blob: c566b57610c93dd03d33b1bbbc3d70aa48873601 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
Marcel Holtmann111902f2014-06-21 04:53:17 +0200361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
Marcel Holtmann111902f2014-06-21 04:53:17 +0200386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800387 return -EALREADY;
388
Marcel Holtmann111902f2014-06-21 04:53:17 +0200389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700427 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700486 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700514 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
Marcel Holtmannac345812014-02-23 12:44:25 -0800590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200593 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
Johan Hedberga1f4c312014-02-27 14:05:41 +0200598 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800599
Johan Hedberga1f4c312014-02-27 14:05:41 +0200600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800601 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700669{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700672
Marcel Holtmann111902f2014-06-21 04:53:17 +0200673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677}
678
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
682{
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
687
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
690
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700696 return -EINVAL;
697
Marcel Holtmann111902f2014-06-21 04:53:17 +0200698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800699 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700700
Marcel Holtmann111902f2014-06-21 04:53:17 +0200701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800702
703 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700704}
705
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
Marcel Holtmann92202182013-10-18 16:38:10 -0700712
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
Marcel Holtmann3698d702014-02-18 21:54:49 -0800738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800774 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800779 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700806 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700834 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800854static int adv_channel_map_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val < 0x01 || val > 0x07)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_adv_channel_map = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int adv_channel_map_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_adv_channel_map;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880 adv_channel_map_set, "%llu\n");
881
Andre Guedes7d474e02014-02-26 20:21:54 -0300882static int le_auto_conn_show(struct seq_file *sf, void *ptr)
883{
884 struct hci_dev *hdev = sf->private;
885 struct hci_conn_params *p;
886
887 hci_dev_lock(hdev);
888
889 list_for_each_entry(p, &hdev->le_conn_params, list) {
890 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
891 p->auto_connect);
892 }
893
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899static int le_auto_conn_open(struct inode *inode, struct file *file)
900{
901 return single_open(file, le_auto_conn_show, inode->i_private);
902}
903
904static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
905 size_t count, loff_t *offset)
906{
907 struct seq_file *sf = file->private_data;
908 struct hci_dev *hdev = sf->private;
909 u8 auto_connect = 0;
910 bdaddr_t addr;
911 u8 addr_type;
912 char *buf;
913 int err = 0;
914 int n;
915
916 /* Don't allow partial write */
917 if (*offset != 0)
918 return -EINVAL;
919
920 if (count < 3)
921 return -EINVAL;
922
Andre Guedes4408dd12014-03-24 16:08:48 -0300923 buf = memdup_user(data, count);
924 if (IS_ERR(buf))
925 return PTR_ERR(buf);
Andre Guedes7d474e02014-02-26 20:21:54 -0300926
927 if (memcmp(buf, "add", 3) == 0) {
928 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
929 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
930 &addr.b[1], &addr.b[0], &addr_type,
931 &auto_connect);
932
933 if (n < 7) {
934 err = -EINVAL;
935 goto done;
936 }
937
938 hci_dev_lock(hdev);
939 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
940 hdev->le_conn_min_interval,
941 hdev->le_conn_max_interval);
942 hci_dev_unlock(hdev);
943
944 if (err)
945 goto done;
946 } else if (memcmp(buf, "del", 3) == 0) {
947 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
948 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
949 &addr.b[1], &addr.b[0], &addr_type);
950
951 if (n < 7) {
952 err = -EINVAL;
953 goto done;
954 }
955
956 hci_dev_lock(hdev);
957 hci_conn_params_del(hdev, &addr, addr_type);
958 hci_dev_unlock(hdev);
959 } else if (memcmp(buf, "clr", 3) == 0) {
960 hci_dev_lock(hdev);
961 hci_conn_params_clear(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300962 hci_dev_unlock(hdev);
963 } else {
964 err = -EINVAL;
965 }
966
967done:
968 kfree(buf);
969
970 if (err)
971 return err;
972 else
973 return count;
974}
975
976static const struct file_operations le_auto_conn_fops = {
977 .open = le_auto_conn_open,
978 .read = seq_read,
979 .write = le_auto_conn_write,
980 .llseek = seq_lseek,
981 .release = single_release,
982};
983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984/* ---- HCI requests ---- */
985
Johan Hedberg42c6b122013-03-05 20:37:49 +0200986static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
990 if (hdev->req_status == HCI_REQ_PEND) {
991 hdev->req_result = result;
992 hdev->req_status = HCI_REQ_DONE;
993 wake_up_interruptible(&hdev->req_wait_q);
994 }
995}
996
997static void hci_req_cancel(struct hci_dev *hdev, int err)
998{
999 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1000
1001 if (hdev->req_status == HCI_REQ_PEND) {
1002 hdev->req_result = err;
1003 hdev->req_status = HCI_REQ_CANCELED;
1004 wake_up_interruptible(&hdev->req_wait_q);
1005 }
1006}
1007
Fengguang Wu77a63e02013-04-20 16:24:31 +03001008static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1009 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001010{
1011 struct hci_ev_cmd_complete *ev;
1012 struct hci_event_hdr *hdr;
1013 struct sk_buff *skb;
1014
1015 hci_dev_lock(hdev);
1016
1017 skb = hdev->recv_evt;
1018 hdev->recv_evt = NULL;
1019
1020 hci_dev_unlock(hdev);
1021
1022 if (!skb)
1023 return ERR_PTR(-ENODATA);
1024
1025 if (skb->len < sizeof(*hdr)) {
1026 BT_ERR("Too short HCI event");
1027 goto failed;
1028 }
1029
1030 hdr = (void *) skb->data;
1031 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1032
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001033 if (event) {
1034 if (hdr->evt != event)
1035 goto failed;
1036 return skb;
1037 }
1038
Johan Hedberg75e84b72013-04-02 13:35:04 +03001039 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1040 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1041 goto failed;
1042 }
1043
1044 if (skb->len < sizeof(*ev)) {
1045 BT_ERR("Too short cmd_complete event");
1046 goto failed;
1047 }
1048
1049 ev = (void *) skb->data;
1050 skb_pull(skb, sizeof(*ev));
1051
1052 if (opcode == __le16_to_cpu(ev->opcode))
1053 return skb;
1054
1055 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1056 __le16_to_cpu(ev->opcode));
1057
1058failed:
1059 kfree_skb(skb);
1060 return ERR_PTR(-ENODATA);
1061}
1062
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001063struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001064 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001065{
1066 DECLARE_WAITQUEUE(wait, current);
1067 struct hci_request req;
1068 int err = 0;
1069
1070 BT_DBG("%s", hdev->name);
1071
1072 hci_req_init(&req, hdev);
1073
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001074 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001075
1076 hdev->req_status = HCI_REQ_PEND;
1077
1078 err = hci_req_run(&req, hci_req_sync_complete);
1079 if (err < 0)
1080 return ERR_PTR(err);
1081
1082 add_wait_queue(&hdev->req_wait_q, &wait);
1083 set_current_state(TASK_INTERRUPTIBLE);
1084
1085 schedule_timeout(timeout);
1086
1087 remove_wait_queue(&hdev->req_wait_q, &wait);
1088
1089 if (signal_pending(current))
1090 return ERR_PTR(-EINTR);
1091
1092 switch (hdev->req_status) {
1093 case HCI_REQ_DONE:
1094 err = -bt_to_errno(hdev->req_result);
1095 break;
1096
1097 case HCI_REQ_CANCELED:
1098 err = -hdev->req_result;
1099 break;
1100
1101 default:
1102 err = -ETIMEDOUT;
1103 break;
1104 }
1105
1106 hdev->req_status = hdev->req_result = 0;
1107
1108 BT_DBG("%s end: err %d", hdev->name, err);
1109
1110 if (err < 0)
1111 return ERR_PTR(err);
1112
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001113 return hci_get_cmd_complete(hdev, opcode, event);
1114}
1115EXPORT_SYMBOL(__hci_cmd_sync_ev);
1116
1117struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001118 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001119{
1120 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001121}
1122EXPORT_SYMBOL(__hci_cmd_sync);
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001125static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001126 void (*func)(struct hci_request *req,
1127 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001128 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 DECLARE_WAITQUEUE(wait, current);
1132 int err = 0;
1133
1134 BT_DBG("%s start", hdev->name);
1135
Johan Hedberg42c6b122013-03-05 20:37:49 +02001136 hci_req_init(&req, hdev);
1137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 hdev->req_status = HCI_REQ_PEND;
1139
Johan Hedberg42c6b122013-03-05 20:37:49 +02001140 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001141
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 err = hci_req_run(&req, hci_req_sync_complete);
1143 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001144 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001145
1146 /* ENODATA means the HCI request command queue is empty.
1147 * This can happen when a request with conditionals doesn't
1148 * trigger any commands to be sent. This is normal behavior
1149 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001150 */
Andre Guedes920c8302013-03-08 11:20:15 -03001151 if (err == -ENODATA)
1152 return 0;
1153
1154 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001155 }
1156
Andre Guedesbc4445c2013-03-08 11:20:13 -03001157 add_wait_queue(&hdev->req_wait_q, &wait);
1158 set_current_state(TASK_INTERRUPTIBLE);
1159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 schedule_timeout(timeout);
1161
1162 remove_wait_queue(&hdev->req_wait_q, &wait);
1163
1164 if (signal_pending(current))
1165 return -EINTR;
1166
1167 switch (hdev->req_status) {
1168 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001169 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 break;
1171
1172 case HCI_REQ_CANCELED:
1173 err = -hdev->req_result;
1174 break;
1175
1176 default:
1177 err = -ETIMEDOUT;
1178 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001179 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
Johan Hedberga5040ef2011-01-10 13:28:59 +02001181 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
1183 BT_DBG("%s end: err %d", hdev->name, err);
1184
1185 return err;
1186}
1187
Johan Hedberg01178cd2013-03-05 20:37:41 +02001188static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001189 void (*req)(struct hci_request *req,
1190 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001191 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192{
1193 int ret;
1194
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001195 if (!test_bit(HCI_UP, &hdev->flags))
1196 return -ENETDOWN;
1197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 /* Serialize all requests */
1199 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001200 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 hci_req_unlock(hdev);
1202
1203 return ret;
1204}
1205
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
1210 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211 set_bit(HCI_RESET, &req->hdev->flags);
1212 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213}
1214
Johan Hedberg42c6b122013-03-05 20:37:49 +02001215static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001218
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001220 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001222 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001223 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001224
1225 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227}
1228
Johan Hedberg42c6b122013-03-05 20:37:49 +02001229static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001230{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001232
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001233 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001234 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001235
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001236 /* Read Local Supported Commands */
1237 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1238
1239 /* Read Local Supported Features */
1240 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1241
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001242 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001243 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001244
1245 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001246 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001247
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001248 /* Read Flow Control Mode */
1249 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1250
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001251 /* Read Location Data */
1252 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001253}
1254
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001256{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001258
1259 BT_DBG("%s %ld", hdev->name, opt);
1260
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001261 /* Reset */
1262 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001263 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001264
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001265 switch (hdev->dev_type) {
1266 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001267 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001268 break;
1269
1270 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001272 break;
1273
1274 default:
1275 BT_ERR("Unknown device type %d", hdev->dev_type);
1276 break;
1277 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001278}
1279
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001281{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001282 struct hci_dev *hdev = req->hdev;
1283
Johan Hedberg2177bab2013-03-05 20:37:43 +02001284 __le16 param;
1285 __u8 flt_type;
1286
1287 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289
1290 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001292
1293 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295
1296 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001298
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001299 /* Read Number of Supported IAC */
1300 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1301
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001302 /* Read Current IAC LAP */
1303 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1304
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305 /* Clear Event Filters */
1306 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308
1309 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001310 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001313 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1314 * but it does not support page scan related HCI commands.
1315 */
1316 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001317 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1318 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1319 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001320}
1321
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001323{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001324 struct hci_dev *hdev = req->hdev;
1325
Johan Hedberg2177bab2013-03-05 20:37:43 +02001326 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
1329 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001330 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001331
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001332 /* Read LE Supported States */
1333 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1334
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001336 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001337
1338 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001341 /* Clear LE White List */
1342 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001343
1344 /* LE-only controllers have LE implicitly enabled */
1345 if (!lmp_bredr_capable(hdev))
1346 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347}
1348
1349static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1350{
1351 if (lmp_ext_inq_capable(hdev))
1352 return 0x02;
1353
1354 if (lmp_inq_rssi_capable(hdev))
1355 return 0x01;
1356
1357 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1358 hdev->lmp_subver == 0x0757)
1359 return 0x01;
1360
1361 if (hdev->manufacturer == 15) {
1362 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1363 return 0x01;
1364 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1365 return 0x01;
1366 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1367 return 0x01;
1368 }
1369
1370 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1371 hdev->lmp_subver == 0x1805)
1372 return 0x01;
1373
1374 return 0x00;
1375}
1376
Johan Hedberg42c6b122013-03-05 20:37:49 +02001377static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378{
1379 u8 mode;
1380
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384}
1385
Johan Hedberg42c6b122013-03-05 20:37:49 +02001386static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001387{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 struct hci_dev *hdev = req->hdev;
1389
Johan Hedberg2177bab2013-03-05 20:37:43 +02001390 /* The second byte is 0xff instead of 0x9f (two reserved bits
1391 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1392 * command otherwise.
1393 */
1394 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1395
1396 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1397 * any event mask for pre 1.2 devices.
1398 */
1399 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1400 return;
1401
1402 if (lmp_bredr_capable(hdev)) {
1403 events[4] |= 0x01; /* Flow Specification Complete */
1404 events[4] |= 0x02; /* Inquiry Result with RSSI */
1405 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1406 events[5] |= 0x08; /* Synchronous Connection Complete */
1407 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001408 } else {
1409 /* Use a different default for LE-only devices */
1410 memset(events, 0, sizeof(events));
1411 events[0] |= 0x10; /* Disconnection Complete */
1412 events[0] |= 0x80; /* Encryption Change */
1413 events[1] |= 0x08; /* Read Remote Version Information Complete */
1414 events[1] |= 0x20; /* Command Complete */
1415 events[1] |= 0x40; /* Command Status */
1416 events[1] |= 0x80; /* Hardware Error */
1417 events[2] |= 0x04; /* Number of Completed Packets */
1418 events[3] |= 0x02; /* Data Buffer Overflow */
1419 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420 }
1421
1422 if (lmp_inq_rssi_capable(hdev))
1423 events[4] |= 0x02; /* Inquiry Result with RSSI */
1424
1425 if (lmp_sniffsubr_capable(hdev))
1426 events[5] |= 0x20; /* Sniff Subrating */
1427
1428 if (lmp_pause_enc_capable(hdev))
1429 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1430
1431 if (lmp_ext_inq_capable(hdev))
1432 events[5] |= 0x40; /* Extended Inquiry Result */
1433
1434 if (lmp_no_flush_capable(hdev))
1435 events[7] |= 0x01; /* Enhanced Flush Complete */
1436
1437 if (lmp_lsto_capable(hdev))
1438 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1439
1440 if (lmp_ssp_capable(hdev)) {
1441 events[6] |= 0x01; /* IO Capability Request */
1442 events[6] |= 0x02; /* IO Capability Response */
1443 events[6] |= 0x04; /* User Confirmation Request */
1444 events[6] |= 0x08; /* User Passkey Request */
1445 events[6] |= 0x10; /* Remote OOB Data Request */
1446 events[6] |= 0x20; /* Simple Pairing Complete */
1447 events[7] |= 0x04; /* User Passkey Notification */
1448 events[7] |= 0x08; /* Keypress Notification */
1449 events[7] |= 0x10; /* Remote Host Supported
1450 * Features Notification
1451 */
1452 }
1453
1454 if (lmp_le_capable(hdev))
1455 events[7] |= 0x20; /* LE Meta-Event */
1456
Johan Hedberg42c6b122013-03-05 20:37:49 +02001457 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001458
1459 if (lmp_le_capable(hdev)) {
1460 memset(events, 0, sizeof(events));
1461 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001462 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1463 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001464 }
1465}
1466
Johan Hedberg42c6b122013-03-05 20:37:49 +02001467static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001468{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001469 struct hci_dev *hdev = req->hdev;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001472 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001473 else
1474 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475
1476 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001481 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482 * local supported commands HCI command.
1483 */
1484 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001485 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001486
1487 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001488 /* When SSP is available, then the host features page
1489 * should also be available as well. However some
1490 * controllers list the max_page as 0 as long as SSP
1491 * has not been enabled. To achieve proper debugging
1492 * output, force the minimum max_page to 1 at least.
1493 */
1494 hdev->max_page = 0x01;
1495
Johan Hedberg2177bab2013-03-05 20:37:43 +02001496 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1497 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001498 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001500 } else {
1501 struct hci_cp_write_eir cp;
1502
1503 memset(hdev->eir, 0, sizeof(hdev->eir));
1504 memset(&cp, 0, sizeof(cp));
1505
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507 }
1508 }
1509
1510 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512
1513 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515
1516 if (lmp_ext_feat_capable(hdev)) {
1517 struct hci_cp_read_local_ext_features cp;
1518
1519 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001520 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1521 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522 }
1523
1524 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1525 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001526 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1527 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528 }
1529}
1530
Johan Hedberg42c6b122013-03-05 20:37:49 +02001531static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001532{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 struct hci_cp_write_def_link_policy cp;
1535 u16 link_policy = 0;
1536
1537 if (lmp_rswitch_capable(hdev))
1538 link_policy |= HCI_LP_RSWITCH;
1539 if (lmp_hold_capable(hdev))
1540 link_policy |= HCI_LP_HOLD;
1541 if (lmp_sniff_capable(hdev))
1542 link_policy |= HCI_LP_SNIFF;
1543 if (lmp_park_capable(hdev))
1544 link_policy |= HCI_LP_PARK;
1545
1546 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001547 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001548}
1549
Johan Hedberg42c6b122013-03-05 20:37:49 +02001550static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001551{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001553 struct hci_cp_write_le_host_supported cp;
1554
Johan Hedbergc73eee92013-04-19 18:35:21 +03001555 /* LE-only devices do not support explicit enablement */
1556 if (!lmp_bredr_capable(hdev))
1557 return;
1558
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559 memset(&cp, 0, sizeof(cp));
1560
1561 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1562 cp.le = 0x01;
1563 cp.simul = lmp_le_br_capable(hdev);
1564 }
1565
1566 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001567 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1568 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001569}
1570
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001571static void hci_set_event_mask_page_2(struct hci_request *req)
1572{
1573 struct hci_dev *hdev = req->hdev;
1574 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1575
1576 /* If Connectionless Slave Broadcast master role is supported
1577 * enable all necessary events for it.
1578 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001579 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001580 events[1] |= 0x40; /* Triggered Clock Capture */
1581 events[1] |= 0x80; /* Synchronization Train Complete */
1582 events[2] |= 0x10; /* Slave Page Response Timeout */
1583 events[2] |= 0x20; /* CSB Channel Map Change */
1584 }
1585
1586 /* If Connectionless Slave Broadcast slave role is supported
1587 * enable all necessary events for it.
1588 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001589 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001590 events[2] |= 0x01; /* Synchronization Train Received */
1591 events[2] |= 0x02; /* CSB Receive */
1592 events[2] |= 0x04; /* CSB Timeout */
1593 events[2] |= 0x08; /* Truncated Page Complete */
1594 }
1595
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001596 /* Enable Authenticated Payload Timeout Expired event if supported */
1597 if (lmp_ping_capable(hdev))
1598 events[2] |= 0x80;
1599
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001600 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1601}
1602
Johan Hedberg42c6b122013-03-05 20:37:49 +02001603static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001604{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001606 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001608 /* Some Broadcom based Bluetooth controllers do not support the
1609 * Delete Stored Link Key command. They are clearly indicating its
1610 * absence in the bit mask of supported commands.
1611 *
1612 * Check the supported commands and only if the the command is marked
1613 * as supported send it. If not supported assume that the controller
1614 * does not have actual support for stored link keys which makes this
1615 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001616 *
1617 * Some controllers indicate that they support handling deleting
1618 * stored link keys, but they don't. The quirk lets a driver
1619 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001620 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001621 if (hdev->commands[6] & 0x80 &&
1622 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001623 struct hci_cp_delete_stored_link_key cp;
1624
1625 bacpy(&cp.bdaddr, BDADDR_ANY);
1626 cp.delete_all = 0x01;
1627 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1628 sizeof(cp), &cp);
1629 }
1630
Johan Hedberg2177bab2013-03-05 20:37:43 +02001631 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001632 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001633
Johan Hedberg7bf32042014-02-23 19:42:29 +02001634 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001635 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001636
1637 /* Read features beyond page 1 if available */
1638 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1639 struct hci_cp_read_local_ext_features cp;
1640
1641 cp.page = p;
1642 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1643 sizeof(cp), &cp);
1644 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001645}
1646
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001647static void hci_init4_req(struct hci_request *req, unsigned long opt)
1648{
1649 struct hci_dev *hdev = req->hdev;
1650
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001651 /* Set event mask page 2 if the HCI command for it is supported */
1652 if (hdev->commands[22] & 0x04)
1653 hci_set_event_mask_page_2(req);
1654
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001655 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001656 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001657 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001658
1659 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001660 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001661 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001662 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1663 u8 support = 0x01;
1664 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1665 sizeof(support), &support);
1666 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001667}
1668
Johan Hedberg2177bab2013-03-05 20:37:43 +02001669static int __hci_init(struct hci_dev *hdev)
1670{
1671 int err;
1672
1673 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1674 if (err < 0)
1675 return err;
1676
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001677 /* The Device Under Test (DUT) mode is special and available for
1678 * all controller types. So just create it early on.
1679 */
1680 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1681 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1682 &dut_mode_fops);
1683 }
1684
Johan Hedberg2177bab2013-03-05 20:37:43 +02001685 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1686 * BR/EDR/LE type controllers. AMP controllers only need the
1687 * first stage init.
1688 */
1689 if (hdev->dev_type != HCI_BREDR)
1690 return 0;
1691
1692 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001696 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1697 if (err < 0)
1698 return err;
1699
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001700 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1701 if (err < 0)
1702 return err;
1703
1704 /* Only create debugfs entries during the initial setup
1705 * phase and not every time the controller gets powered on.
1706 */
1707 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1708 return 0;
1709
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001710 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1711 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001712 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1713 &hdev->manufacturer);
1714 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1715 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001716 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1717 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001718 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1719
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001720 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1721 &conn_info_min_age_fops);
1722 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1723 &conn_info_max_age_fops);
1724
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001725 if (lmp_bredr_capable(hdev)) {
1726 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1727 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001728 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1729 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001730 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1731 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001732 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1733 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001734 }
1735
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001736 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001737 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1738 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001739 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1740 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001741 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1742 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001743 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001744
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001745 if (lmp_sniff_capable(hdev)) {
1746 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1747 hdev, &idle_timeout_fops);
1748 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1749 hdev, &sniff_min_interval_fops);
1750 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1751 hdev, &sniff_max_interval_fops);
1752 }
1753
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001754 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001755 debugfs_create_file("identity", 0400, hdev->debugfs,
1756 hdev, &identity_fops);
1757 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1758 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001759 debugfs_create_file("random_address", 0444, hdev->debugfs,
1760 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001761 debugfs_create_file("static_address", 0444, hdev->debugfs,
1762 hdev, &static_address_fops);
1763
1764 /* For controllers with a public address, provide a debug
1765 * option to force the usage of the configured static
1766 * address. By default the public address is used.
1767 */
1768 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1769 debugfs_create_file("force_static_address", 0644,
1770 hdev->debugfs, hdev,
1771 &force_static_address_fops);
1772
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001773 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1774 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001775 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1776 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001777 debugfs_create_file("identity_resolving_keys", 0400,
1778 hdev->debugfs, hdev,
1779 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001780 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1781 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001782 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1783 hdev, &conn_min_interval_fops);
1784 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1785 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001786 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1787 hdev, &adv_channel_map_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001788 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1789 &le_auto_conn_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001790 debugfs_create_u16("discov_interleaved_timeout", 0644,
1791 hdev->debugfs,
1792 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001793 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001794
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001795 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001796}
1797
Johan Hedberg42c6b122013-03-05 20:37:49 +02001798static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799{
1800 __u8 scan = opt;
1801
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
1804 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001805 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806}
1807
Johan Hedberg42c6b122013-03-05 20:37:49 +02001808static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809{
1810 __u8 auth = opt;
1811
Johan Hedberg42c6b122013-03-05 20:37:49 +02001812 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
1814 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001815 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816}
1817
Johan Hedberg42c6b122013-03-05 20:37:49 +02001818static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819{
1820 __u8 encrypt = opt;
1821
Johan Hedberg42c6b122013-03-05 20:37:49 +02001822 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001824 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001825 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826}
1827
Johan Hedberg42c6b122013-03-05 20:37:49 +02001828static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001829{
1830 __le16 policy = cpu_to_le16(opt);
1831
Johan Hedberg42c6b122013-03-05 20:37:49 +02001832 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001833
1834 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001835 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001836}
1837
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001838/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 * Device is held on return. */
1840struct hci_dev *hci_dev_get(int index)
1841{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001842 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843
1844 BT_DBG("%d", index);
1845
1846 if (index < 0)
1847 return NULL;
1848
1849 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001850 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 if (d->id == index) {
1852 hdev = hci_dev_hold(d);
1853 break;
1854 }
1855 }
1856 read_unlock(&hci_dev_list_lock);
1857 return hdev;
1858}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
1860/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001861
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001862bool hci_discovery_active(struct hci_dev *hdev)
1863{
1864 struct discovery_state *discov = &hdev->discovery;
1865
Andre Guedes6fbe1952012-02-03 17:47:58 -03001866 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001867 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001868 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001869 return true;
1870
Andre Guedes6fbe1952012-02-03 17:47:58 -03001871 default:
1872 return false;
1873 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001874}
1875
Johan Hedbergff9ef572012-01-04 14:23:45 +02001876void hci_discovery_set_state(struct hci_dev *hdev, int state)
1877{
1878 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1879
1880 if (hdev->discovery.state == state)
1881 return;
1882
1883 switch (state) {
1884 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001885 hci_update_background_scan(hdev);
1886
Andre Guedes7b99b652012-02-13 15:41:02 -03001887 if (hdev->discovery.state != DISCOVERY_STARTING)
1888 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001889 break;
1890 case DISCOVERY_STARTING:
1891 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001892 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001893 mgmt_discovering(hdev, 1);
1894 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001895 case DISCOVERY_RESOLVING:
1896 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001897 case DISCOVERY_STOPPING:
1898 break;
1899 }
1900
1901 hdev->discovery.state = state;
1902}
1903
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001904void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905{
Johan Hedberg30883512012-01-04 14:16:21 +02001906 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001907 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
Johan Hedberg561aafb2012-01-04 13:31:59 +02001909 list_for_each_entry_safe(p, n, &cache->all, all) {
1910 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001911 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001913
1914 INIT_LIST_HEAD(&cache->unknown);
1915 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916}
1917
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001918struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1919 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920{
Johan Hedberg30883512012-01-04 14:16:21 +02001921 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 struct inquiry_entry *e;
1923
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001924 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
Johan Hedberg561aafb2012-01-04 13:31:59 +02001926 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001928 return e;
1929 }
1930
1931 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932}
1933
Johan Hedberg561aafb2012-01-04 13:31:59 +02001934struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001935 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001936{
Johan Hedberg30883512012-01-04 14:16:21 +02001937 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001938 struct inquiry_entry *e;
1939
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001940 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001941
1942 list_for_each_entry(e, &cache->unknown, list) {
1943 if (!bacmp(&e->data.bdaddr, bdaddr))
1944 return e;
1945 }
1946
1947 return NULL;
1948}
1949
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001950struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001951 bdaddr_t *bdaddr,
1952 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001953{
1954 struct discovery_state *cache = &hdev->discovery;
1955 struct inquiry_entry *e;
1956
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001957 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001958
1959 list_for_each_entry(e, &cache->resolve, list) {
1960 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1961 return e;
1962 if (!bacmp(&e->data.bdaddr, bdaddr))
1963 return e;
1964 }
1965
1966 return NULL;
1967}
1968
Johan Hedberga3d4e202012-01-09 00:53:02 +02001969void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001970 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001971{
1972 struct discovery_state *cache = &hdev->discovery;
1973 struct list_head *pos = &cache->resolve;
1974 struct inquiry_entry *p;
1975
1976 list_del(&ie->list);
1977
1978 list_for_each_entry(p, &cache->resolve, list) {
1979 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001980 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001981 break;
1982 pos = &p->list;
1983 }
1984
1985 list_add(&ie->list, pos);
1986}
1987
Johan Hedberg31754052012-01-04 13:39:52 +02001988bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001989 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990{
Johan Hedberg30883512012-01-04 14:16:21 +02001991 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001992 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001994 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Szymon Janc2b2fec42012-11-20 11:38:54 +01001996 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1997
Johan Hedberg01735bb2014-03-25 12:06:18 +02001998 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001999
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002000 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002001 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02002002 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002003 *ssp = true;
2004
Johan Hedberga3d4e202012-01-09 00:53:02 +02002005 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002006 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002007 ie->data.rssi = data->rssi;
2008 hci_inquiry_cache_update_resolve(hdev, ie);
2009 }
2010
Johan Hedberg561aafb2012-01-04 13:31:59 +02002011 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002012 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002013
Johan Hedberg561aafb2012-01-04 13:31:59 +02002014 /* Entry not in the cache. Add new one. */
2015 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2016 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002017 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002018
2019 list_add(&ie->all, &cache->all);
2020
2021 if (name_known) {
2022 ie->name_state = NAME_KNOWN;
2023 } else {
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2026 }
2027
2028update:
2029 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002030 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 }
2034
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002038
2039 if (ie->name_state == NAME_NOT_KNOWN)
2040 return false;
2041
2042 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043}
2044
2045static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2046{
Johan Hedberg30883512012-01-04 14:16:21 +02002047 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 struct inquiry_info *info = (struct inquiry_info *) buf;
2049 struct inquiry_entry *e;
2050 int copied = 0;
2051
Johan Hedberg561aafb2012-01-04 13:31:59 +02002052 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002054
2055 if (copied >= num)
2056 break;
2057
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 bacpy(&info->bdaddr, &data->bdaddr);
2059 info->pscan_rep_mode = data->pscan_rep_mode;
2060 info->pscan_period_mode = data->pscan_period_mode;
2061 info->pscan_mode = data->pscan_mode;
2062 memcpy(info->dev_class, data->dev_class, 3);
2063 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002066 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 }
2068
2069 BT_DBG("cache %p, copied %d", cache, copied);
2070 return copied;
2071}
2072
Johan Hedberg42c6b122013-03-05 20:37:49 +02002073static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074{
2075 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002076 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 struct hci_cp_inquiry cp;
2078
2079 BT_DBG("%s", hdev->name);
2080
2081 if (test_bit(HCI_INQUIRY, &hdev->flags))
2082 return;
2083
2084 /* Start Inquiry */
2085 memcpy(&cp.lap, &ir->lap, 3);
2086 cp.length = ir->length;
2087 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002088 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089}
2090
Andre Guedes3e13fa12013-03-27 20:04:56 -03002091static int wait_inquiry(void *word)
2092{
2093 schedule();
2094 return signal_pending(current);
2095}
2096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097int hci_inquiry(void __user *arg)
2098{
2099 __u8 __user *ptr = arg;
2100 struct hci_inquiry_req ir;
2101 struct hci_dev *hdev;
2102 int err = 0, do_inquiry = 0, max_rsp;
2103 long timeo;
2104 __u8 *buf;
2105
2106 if (copy_from_user(&ir, ptr, sizeof(ir)))
2107 return -EFAULT;
2108
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002109 hdev = hci_dev_get(ir.dev_id);
2110 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 return -ENODEV;
2112
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002113 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2114 err = -EBUSY;
2115 goto done;
2116 }
2117
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002118 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2119 err = -EOPNOTSUPP;
2120 goto done;
2121 }
2122
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002123 if (hdev->dev_type != HCI_BREDR) {
2124 err = -EOPNOTSUPP;
2125 goto done;
2126 }
2127
Johan Hedberg56f87902013-10-02 13:43:13 +03002128 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2129 err = -EOPNOTSUPP;
2130 goto done;
2131 }
2132
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002133 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002134 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002135 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002136 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 do_inquiry = 1;
2138 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002139 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
Marcel Holtmann04837f62006-07-03 10:02:33 +02002141 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002142
2143 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002144 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2145 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002146 if (err < 0)
2147 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002148
2149 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2150 * cleared). If it is interrupted by a signal, return -EINTR.
2151 */
2152 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2153 TASK_INTERRUPTIBLE))
2154 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002155 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002157 /* for unlimited number of responses we will use buffer with
2158 * 255 entries
2159 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2161
2162 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2163 * copy it to the user space.
2164 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002165 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002166 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 err = -ENOMEM;
2168 goto done;
2169 }
2170
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002171 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002173 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175 BT_DBG("num_rsp %d", ir.num_rsp);
2176
2177 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2178 ptr += sizeof(ir);
2179 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002180 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002182 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 err = -EFAULT;
2184
2185 kfree(buf);
2186
2187done:
2188 hci_dev_put(hdev);
2189 return err;
2190}
2191
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002192static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 int ret = 0;
2195
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 BT_DBG("%s %p", hdev->name, hdev);
2197
2198 hci_req_lock(hdev);
2199
Johan Hovold94324962012-03-15 14:48:41 +01002200 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2201 ret = -ENODEV;
2202 goto done;
2203 }
2204
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002205 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2206 /* Check for rfkill but allow the HCI setup stage to
2207 * proceed (which in itself doesn't cause any RF activity).
2208 */
2209 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2210 ret = -ERFKILL;
2211 goto done;
2212 }
2213
2214 /* Check for valid public address or a configured static
2215 * random adddress, but let the HCI setup proceed to
2216 * be able to determine if there is a public address
2217 * or not.
2218 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002219 * In case of user channel usage, it is not important
2220 * if a public address or static random address is
2221 * available.
2222 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002223 * This check is only valid for BR/EDR controllers
2224 * since AMP controllers do not have an address.
2225 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002226 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2227 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002228 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2229 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2230 ret = -EADDRNOTAVAIL;
2231 goto done;
2232 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002233 }
2234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 if (test_bit(HCI_UP, &hdev->flags)) {
2236 ret = -EALREADY;
2237 goto done;
2238 }
2239
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 if (hdev->open(hdev)) {
2241 ret = -EIO;
2242 goto done;
2243 }
2244
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002245 atomic_set(&hdev->cmd_cnt, 1);
2246 set_bit(HCI_INIT, &hdev->flags);
2247
2248 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2249 ret = hdev->setup(hdev);
2250
2251 if (!ret) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002252 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002253 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002254 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 }
2256
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002257 clear_bit(HCI_INIT, &hdev->flags);
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 if (!ret) {
2260 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002261 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 set_bit(HCI_UP, &hdev->flags);
2263 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002264 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002266 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002267 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002268 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002269 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002270 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002271 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002273 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002274 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002275 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
2277 skb_queue_purge(&hdev->cmd_q);
2278 skb_queue_purge(&hdev->rx_q);
2279
2280 if (hdev->flush)
2281 hdev->flush(hdev);
2282
2283 if (hdev->sent_cmd) {
2284 kfree_skb(hdev->sent_cmd);
2285 hdev->sent_cmd = NULL;
2286 }
2287
2288 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002289 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 }
2291
2292done:
2293 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 return ret;
2295}
2296
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002297/* ---- HCI ioctl helpers ---- */
2298
2299int hci_dev_open(__u16 dev)
2300{
2301 struct hci_dev *hdev;
2302 int err;
2303
2304 hdev = hci_dev_get(dev);
2305 if (!hdev)
2306 return -ENODEV;
2307
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002308 /* Devices that are marked for raw-only usage can only be powered
2309 * up as user channel. Trying to bring them up as normal devices
2310 * will result into a failure. Only user channel operation is
2311 * possible.
2312 *
2313 * When this function is called for a user channel, the flag
2314 * HCI_USER_CHANNEL will be set first before attempting to
2315 * open the device.
2316 */
2317 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2318 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2319 err = -EOPNOTSUPP;
2320 goto done;
2321 }
2322
Johan Hedberge1d08f42013-10-01 22:44:50 +03002323 /* We need to ensure that no other power on/off work is pending
2324 * before proceeding to call hci_dev_do_open. This is
2325 * particularly important if the setup procedure has not yet
2326 * completed.
2327 */
2328 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2329 cancel_delayed_work(&hdev->power_off);
2330
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002331 /* After this call it is guaranteed that the setup procedure
2332 * has finished. This means that error conditions like RFKILL
2333 * or no valid public or static random address apply.
2334 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002335 flush_workqueue(hdev->req_workqueue);
2336
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002337 err = hci_dev_do_open(hdev);
2338
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002339done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002340 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002341 return err;
2342}
2343
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344static int hci_dev_do_close(struct hci_dev *hdev)
2345{
2346 BT_DBG("%s %p", hdev->name, hdev);
2347
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002348 cancel_delayed_work(&hdev->power_off);
2349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 hci_req_cancel(hdev, ENODEV);
2351 hci_req_lock(hdev);
2352
2353 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002354 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 hci_req_unlock(hdev);
2356 return 0;
2357 }
2358
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002359 /* Flush RX and TX works */
2360 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002361 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002363 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002364 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002365 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002366 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002367 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002368 }
2369
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002370 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002371 cancel_delayed_work(&hdev->service_cache);
2372
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002373 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002374
2375 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2376 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002377
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002378 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002379 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002381 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002382 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383
2384 hci_notify(hdev, HCI_DEV_DOWN);
2385
2386 if (hdev->flush)
2387 hdev->flush(hdev);
2388
2389 /* Reset device */
2390 skb_queue_purge(&hdev->cmd_q);
2391 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002392 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002393 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002394 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002396 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 clear_bit(HCI_INIT, &hdev->flags);
2398 }
2399
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002400 /* flush cmd work */
2401 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
2403 /* Drop queues */
2404 skb_queue_purge(&hdev->rx_q);
2405 skb_queue_purge(&hdev->cmd_q);
2406 skb_queue_purge(&hdev->raw_q);
2407
2408 /* Drop last sent command */
2409 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002410 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 kfree_skb(hdev->sent_cmd);
2412 hdev->sent_cmd = NULL;
2413 }
2414
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002415 kfree_skb(hdev->recv_evt);
2416 hdev->recv_evt = NULL;
2417
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 /* After this point our queues are empty
2419 * and no tasks are scheduled. */
2420 hdev->close(hdev);
2421
Johan Hedberg35b973c2013-03-15 17:06:59 -05002422 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002423 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002424 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2425
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002426 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2427 if (hdev->dev_type == HCI_BREDR) {
2428 hci_dev_lock(hdev);
2429 mgmt_powered(hdev, 0);
2430 hci_dev_unlock(hdev);
2431 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002432 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002433
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002434 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002435 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002436
Johan Hedberge59fda82012-02-22 18:11:53 +02002437 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002438 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002439 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002440
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 hci_req_unlock(hdev);
2442
2443 hci_dev_put(hdev);
2444 return 0;
2445}
2446
2447int hci_dev_close(__u16 dev)
2448{
2449 struct hci_dev *hdev;
2450 int err;
2451
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002452 hdev = hci_dev_get(dev);
2453 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002455
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002456 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2457 err = -EBUSY;
2458 goto done;
2459 }
2460
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002461 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2462 cancel_delayed_work(&hdev->power_off);
2463
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002465
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002466done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 hci_dev_put(hdev);
2468 return err;
2469}
2470
2471int hci_dev_reset(__u16 dev)
2472{
2473 struct hci_dev *hdev;
2474 int ret = 0;
2475
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002476 hdev = hci_dev_get(dev);
2477 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 return -ENODEV;
2479
2480 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481
Marcel Holtmann808a0492013-08-26 20:57:58 -07002482 if (!test_bit(HCI_UP, &hdev->flags)) {
2483 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002487 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2488 ret = -EBUSY;
2489 goto done;
2490 }
2491
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2493 ret = -EOPNOTSUPP;
2494 goto done;
2495 }
2496
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 /* Drop queues */
2498 skb_queue_purge(&hdev->rx_q);
2499 skb_queue_purge(&hdev->cmd_q);
2500
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002501 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002502 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002504 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
2506 if (hdev->flush)
2507 hdev->flush(hdev);
2508
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002509 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002510 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002512 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
2514done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 hci_req_unlock(hdev);
2516 hci_dev_put(hdev);
2517 return ret;
2518}
2519
2520int hci_dev_reset_stat(__u16 dev)
2521{
2522 struct hci_dev *hdev;
2523 int ret = 0;
2524
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002525 hdev = hci_dev_get(dev);
2526 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 return -ENODEV;
2528
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002529 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2530 ret = -EBUSY;
2531 goto done;
2532 }
2533
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002534 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2535 ret = -EOPNOTSUPP;
2536 goto done;
2537 }
2538
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2540
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002541done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 return ret;
2544}
2545
2546int hci_dev_cmd(unsigned int cmd, void __user *arg)
2547{
2548 struct hci_dev *hdev;
2549 struct hci_dev_req dr;
2550 int err = 0;
2551
2552 if (copy_from_user(&dr, arg, sizeof(dr)))
2553 return -EFAULT;
2554
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002555 hdev = hci_dev_get(dr.dev_id);
2556 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 return -ENODEV;
2558
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002559 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2560 err = -EBUSY;
2561 goto done;
2562 }
2563
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002564 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2565 err = -EOPNOTSUPP;
2566 goto done;
2567 }
2568
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002569 if (hdev->dev_type != HCI_BREDR) {
2570 err = -EOPNOTSUPP;
2571 goto done;
2572 }
2573
Johan Hedberg56f87902013-10-02 13:43:13 +03002574 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2575 err = -EOPNOTSUPP;
2576 goto done;
2577 }
2578
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 switch (cmd) {
2580 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002581 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2582 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 break;
2584
2585 case HCISETENCRYPT:
2586 if (!lmp_encrypt_capable(hdev)) {
2587 err = -EOPNOTSUPP;
2588 break;
2589 }
2590
2591 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2592 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002593 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2594 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 if (err)
2596 break;
2597 }
2598
Johan Hedberg01178cd2013-03-05 20:37:41 +02002599 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2600 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 break;
2602
2603 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002604 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2605 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 break;
2607
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002608 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002609 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2610 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002611 break;
2612
2613 case HCISETLINKMODE:
2614 hdev->link_mode = ((__u16) dr.dev_opt) &
2615 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2616 break;
2617
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 case HCISETPTYPE:
2619 hdev->pkt_type = (__u16) dr.dev_opt;
2620 break;
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002623 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2624 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 break;
2626
2627 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002628 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2629 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 break;
2631
2632 default:
2633 err = -EINVAL;
2634 break;
2635 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002636
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002637done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 hci_dev_put(hdev);
2639 return err;
2640}
2641
2642int hci_get_dev_list(void __user *arg)
2643{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002644 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 struct hci_dev_list_req *dl;
2646 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 int n = 0, size, err;
2648 __u16 dev_num;
2649
2650 if (get_user(dev_num, (__u16 __user *) arg))
2651 return -EFAULT;
2652
2653 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2654 return -EINVAL;
2655
2656 size = sizeof(*dl) + dev_num * sizeof(*dr);
2657
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002658 dl = kzalloc(size, GFP_KERNEL);
2659 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 return -ENOMEM;
2661
2662 dr = dl->dev_req;
2663
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002664 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002665 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002666 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002667 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002668
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002669 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2670 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002671
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 (dr + n)->dev_id = hdev->id;
2673 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002674
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675 if (++n >= dev_num)
2676 break;
2677 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002678 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679
2680 dl->dev_num = n;
2681 size = sizeof(*dl) + n * sizeof(*dr);
2682
2683 err = copy_to_user(arg, dl, size);
2684 kfree(dl);
2685
2686 return err ? -EFAULT : 0;
2687}
2688
2689int hci_get_dev_info(void __user *arg)
2690{
2691 struct hci_dev *hdev;
2692 struct hci_dev_info di;
2693 int err = 0;
2694
2695 if (copy_from_user(&di, arg, sizeof(di)))
2696 return -EFAULT;
2697
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002698 hdev = hci_dev_get(di.dev_id);
2699 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 return -ENODEV;
2701
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002702 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002703 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002704
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002705 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2706 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002707
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 strcpy(di.name, hdev->name);
2709 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002710 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 di.flags = hdev->flags;
2712 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002713 if (lmp_bredr_capable(hdev)) {
2714 di.acl_mtu = hdev->acl_mtu;
2715 di.acl_pkts = hdev->acl_pkts;
2716 di.sco_mtu = hdev->sco_mtu;
2717 di.sco_pkts = hdev->sco_pkts;
2718 } else {
2719 di.acl_mtu = hdev->le_mtu;
2720 di.acl_pkts = hdev->le_pkts;
2721 di.sco_mtu = 0;
2722 di.sco_pkts = 0;
2723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 di.link_policy = hdev->link_policy;
2725 di.link_mode = hdev->link_mode;
2726
2727 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2728 memcpy(&di.features, &hdev->features, sizeof(di.features));
2729
2730 if (copy_to_user(arg, &di, sizeof(di)))
2731 err = -EFAULT;
2732
2733 hci_dev_put(hdev);
2734
2735 return err;
2736}
2737
2738/* ---- Interface to HCI drivers ---- */
2739
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002740static int hci_rfkill_set_block(void *data, bool blocked)
2741{
2742 struct hci_dev *hdev = data;
2743
2744 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2745
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002746 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2747 return -EBUSY;
2748
Johan Hedberg5e130362013-09-13 08:58:17 +03002749 if (blocked) {
2750 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002751 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2752 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002753 } else {
2754 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002755 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002756
2757 return 0;
2758}
2759
2760static const struct rfkill_ops hci_rfkill_ops = {
2761 .set_block = hci_rfkill_set_block,
2762};
2763
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002764static void hci_power_on(struct work_struct *work)
2765{
2766 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002767 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002768
2769 BT_DBG("%s", hdev->name);
2770
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002771 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002772 if (err < 0) {
2773 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002774 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002775 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002776
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002777 /* During the HCI setup phase, a few error conditions are
2778 * ignored and they need to be checked now. If they are still
2779 * valid, it is important to turn the device back off.
2780 */
2781 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2782 (hdev->dev_type == HCI_BREDR &&
2783 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2784 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002785 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2786 hci_dev_do_close(hdev);
2787 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002788 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2789 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002790 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002791
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002792 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2793 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2794 mgmt_index_added(hdev);
2795 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002796}
2797
2798static void hci_power_off(struct work_struct *work)
2799{
Johan Hedberg32435532011-11-07 22:16:04 +02002800 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002801 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002802
2803 BT_DBG("%s", hdev->name);
2804
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002805 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002806}
2807
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002808static void hci_discov_off(struct work_struct *work)
2809{
2810 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002811
2812 hdev = container_of(work, struct hci_dev, discov_off.work);
2813
2814 BT_DBG("%s", hdev->name);
2815
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002816 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002817}
2818
Johan Hedberg35f74982014-02-18 17:14:32 +02002819void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002820{
Johan Hedberg48210022013-01-27 00:31:28 +02002821 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002822
Johan Hedberg48210022013-01-27 00:31:28 +02002823 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2824 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002825 kfree(uuid);
2826 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002827}
2828
Johan Hedberg35f74982014-02-18 17:14:32 +02002829void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002830{
2831 struct list_head *p, *n;
2832
2833 list_for_each_safe(p, n, &hdev->link_keys) {
2834 struct link_key *key;
2835
2836 key = list_entry(p, struct link_key, list);
2837
2838 list_del(p);
2839 kfree(key);
2840 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002841}
2842
Johan Hedberg35f74982014-02-18 17:14:32 +02002843void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002844{
2845 struct smp_ltk *k, *tmp;
2846
2847 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2848 list_del(&k->list);
2849 kfree(k);
2850 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002851}
2852
Johan Hedberg970c4e42014-02-18 10:19:33 +02002853void hci_smp_irks_clear(struct hci_dev *hdev)
2854{
2855 struct smp_irk *k, *tmp;
2856
2857 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2858 list_del(&k->list);
2859 kfree(k);
2860 }
2861}
2862
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002863struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2864{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002865 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002866
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002867 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002868 if (bacmp(bdaddr, &k->bdaddr) == 0)
2869 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002870
2871 return NULL;
2872}
2873
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302874static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002875 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002876{
2877 /* Legacy key */
2878 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302879 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002880
2881 /* Debug keys are insecure so don't store them persistently */
2882 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302883 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002884
2885 /* Changed combination key and there's no previous one */
2886 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302887 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002888
2889 /* Security mode 3 case */
2890 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302891 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002892
2893 /* Neither local nor remote side had no-bonding as requirement */
2894 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302895 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002896
2897 /* Local side had dedicated bonding as requirement */
2898 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302899 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002900
2901 /* Remote side had dedicated bonding as requirement */
2902 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302903 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002904
2905 /* If none of the above criteria match, then don't store the key
2906 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302907 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002908}
2909
Johan Hedberg98a0b842014-01-30 19:40:00 -08002910static bool ltk_type_master(u8 type)
2911{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002912 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002913}
2914
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002915struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002916 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002917{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002918 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002919
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002920 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002921 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002922 continue;
2923
Johan Hedberg98a0b842014-01-30 19:40:00 -08002924 if (ltk_type_master(k->type) != master)
2925 continue;
2926
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002927 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002928 }
2929
2930 return NULL;
2931}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002932
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002933struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002934 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002935{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002936 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002937
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002938 list_for_each_entry(k, &hdev->long_term_keys, list)
2939 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002940 bacmp(bdaddr, &k->bdaddr) == 0 &&
2941 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002942 return k;
2943
2944 return NULL;
2945}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002946
Johan Hedberg970c4e42014-02-18 10:19:33 +02002947struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2948{
2949 struct smp_irk *irk;
2950
2951 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2952 if (!bacmp(&irk->rpa, rpa))
2953 return irk;
2954 }
2955
2956 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2957 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2958 bacpy(&irk->rpa, rpa);
2959 return irk;
2960 }
2961 }
2962
2963 return NULL;
2964}
2965
2966struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2967 u8 addr_type)
2968{
2969 struct smp_irk *irk;
2970
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002971 /* Identity Address must be public or static random */
2972 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2973 return NULL;
2974
Johan Hedberg970c4e42014-02-18 10:19:33 +02002975 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2976 if (addr_type == irk->addr_type &&
2977 bacmp(bdaddr, &irk->bdaddr) == 0)
2978 return irk;
2979 }
2980
2981 return NULL;
2982}
2983
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002984struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002985 bdaddr_t *bdaddr, u8 *val, u8 type,
2986 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002987{
2988 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302989 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002990
2991 old_key = hci_find_link_key(hdev, bdaddr);
2992 if (old_key) {
2993 old_key_type = old_key->type;
2994 key = old_key;
2995 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002996 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002997 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002998 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002999 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003000 list_add(&key->list, &hdev->link_keys);
3001 }
3002
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003003 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003004
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003005 /* Some buggy controller combinations generate a changed
3006 * combination key for legacy pairing even when there's no
3007 * previous key */
3008 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003009 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003010 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003011 if (conn)
3012 conn->key_type = type;
3013 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003014
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003015 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003016 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003017 key->pin_len = pin_len;
3018
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003019 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003020 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003021 else
3022 key->type = type;
3023
Johan Hedberg7652ff62014-06-24 13:15:49 +03003024 if (persistent)
3025 *persistent = hci_persistent_key(hdev, conn, type,
3026 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003027
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003028 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003029}
3030
Johan Hedbergca9142b2014-02-19 14:57:44 +02003031struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003032 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003033 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003034{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003035 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003036 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003037
Johan Hedberg98a0b842014-01-30 19:40:00 -08003038 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003039 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003040 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003041 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003042 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003043 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003044 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003045 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003046 }
3047
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003048 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003049 key->bdaddr_type = addr_type;
3050 memcpy(key->val, tk, sizeof(key->val));
3051 key->authenticated = authenticated;
3052 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003053 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003054 key->enc_size = enc_size;
3055 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003056
Johan Hedbergca9142b2014-02-19 14:57:44 +02003057 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003058}
3059
Johan Hedbergca9142b2014-02-19 14:57:44 +02003060struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3061 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003062{
3063 struct smp_irk *irk;
3064
3065 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3066 if (!irk) {
3067 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3068 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003069 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003070
3071 bacpy(&irk->bdaddr, bdaddr);
3072 irk->addr_type = addr_type;
3073
3074 list_add(&irk->list, &hdev->identity_resolving_keys);
3075 }
3076
3077 memcpy(irk->val, val, 16);
3078 bacpy(&irk->rpa, rpa);
3079
Johan Hedbergca9142b2014-02-19 14:57:44 +02003080 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003081}
3082
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003083int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3084{
3085 struct link_key *key;
3086
3087 key = hci_find_link_key(hdev, bdaddr);
3088 if (!key)
3089 return -ENOENT;
3090
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003091 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003092
3093 list_del(&key->list);
3094 kfree(key);
3095
3096 return 0;
3097}
3098
Johan Hedberge0b2b272014-02-18 17:14:31 +02003099int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003100{
3101 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003102 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003103
3104 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003105 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003106 continue;
3107
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003108 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003109
3110 list_del(&k->list);
3111 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003112 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003113 }
3114
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003115 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003116}
3117
Johan Hedberga7ec7332014-02-18 17:14:35 +02003118void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3119{
3120 struct smp_irk *k, *tmp;
3121
Johan Hedberg668b7b12014-02-21 16:03:31 +02003122 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003123 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3124 continue;
3125
3126 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3127
3128 list_del(&k->list);
3129 kfree(k);
3130 }
3131}
3132
Ville Tervo6bd32322011-02-16 16:32:41 +02003133/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003134static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003135{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003136 struct hci_dev *hdev = container_of(work, struct hci_dev,
3137 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003138
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003139 if (hdev->sent_cmd) {
3140 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3141 u16 opcode = __le16_to_cpu(sent->opcode);
3142
3143 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3144 } else {
3145 BT_ERR("%s command tx timeout", hdev->name);
3146 }
3147
Ville Tervo6bd32322011-02-16 16:32:41 +02003148 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003149 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003150}
3151
Szymon Janc2763eda2011-03-22 13:12:22 +01003152struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003153 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003154{
3155 struct oob_data *data;
3156
3157 list_for_each_entry(data, &hdev->remote_oob_data, list)
3158 if (bacmp(bdaddr, &data->bdaddr) == 0)
3159 return data;
3160
3161 return NULL;
3162}
3163
3164int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3165{
3166 struct oob_data *data;
3167
3168 data = hci_find_remote_oob_data(hdev, bdaddr);
3169 if (!data)
3170 return -ENOENT;
3171
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003172 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003173
3174 list_del(&data->list);
3175 kfree(data);
3176
3177 return 0;
3178}
3179
Johan Hedberg35f74982014-02-18 17:14:32 +02003180void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003181{
3182 struct oob_data *data, *n;
3183
3184 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3185 list_del(&data->list);
3186 kfree(data);
3187 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003188}
3189
Marcel Holtmann07988722014-01-10 02:07:29 -08003190int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3191 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003192{
3193 struct oob_data *data;
3194
3195 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003196 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003197 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003198 if (!data)
3199 return -ENOMEM;
3200
3201 bacpy(&data->bdaddr, bdaddr);
3202 list_add(&data->list, &hdev->remote_oob_data);
3203 }
3204
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003205 memcpy(data->hash192, hash, sizeof(data->hash192));
3206 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003207
Marcel Holtmann07988722014-01-10 02:07:29 -08003208 memset(data->hash256, 0, sizeof(data->hash256));
3209 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3210
3211 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3212
3213 return 0;
3214}
3215
3216int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217 u8 *hash192, u8 *randomizer192,
3218 u8 *hash256, u8 *randomizer256)
3219{
3220 struct oob_data *data;
3221
3222 data = hci_find_remote_oob_data(hdev, bdaddr);
3223 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003224 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003225 if (!data)
3226 return -ENOMEM;
3227
3228 bacpy(&data->bdaddr, bdaddr);
3229 list_add(&data->list, &hdev->remote_oob_data);
3230 }
3231
3232 memcpy(data->hash192, hash192, sizeof(data->hash192));
3233 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3234
3235 memcpy(data->hash256, hash256, sizeof(data->hash256));
3236 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3237
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003238 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003239
3240 return 0;
3241}
3242
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003243struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3244 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003245{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003246 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003247
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003248 list_for_each_entry(b, &hdev->blacklist, list) {
3249 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003250 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003251 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003252
3253 return NULL;
3254}
3255
Marcel Holtmannc9507492014-02-27 19:35:54 -08003256static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003257{
3258 struct list_head *p, *n;
3259
3260 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003261 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003262
3263 list_del(p);
3264 kfree(b);
3265 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003266}
3267
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003268int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003269{
3270 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003271
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003272 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003273 return -EBADF;
3274
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003275 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003276 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003277
3278 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003279 if (!entry)
3280 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003281
3282 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003283 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003284
3285 list_add(&entry->list, &hdev->blacklist);
3286
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003287 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003288}
3289
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003290int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003291{
3292 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003293
Johan Hedberg35f74982014-02-18 17:14:32 +02003294 if (!bacmp(bdaddr, BDADDR_ANY)) {
3295 hci_blacklist_clear(hdev);
3296 return 0;
3297 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003298
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003299 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003300 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003301 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003302
3303 list_del(&entry->list);
3304 kfree(entry);
3305
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003306 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003307}
3308
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003309struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3310 bdaddr_t *bdaddr, u8 type)
3311{
3312 struct bdaddr_list *b;
3313
3314 list_for_each_entry(b, &hdev->le_white_list, list) {
3315 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3316 return b;
3317 }
3318
3319 return NULL;
3320}
3321
3322void hci_white_list_clear(struct hci_dev *hdev)
3323{
3324 struct list_head *p, *n;
3325
3326 list_for_each_safe(p, n, &hdev->le_white_list) {
3327 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3328
3329 list_del(p);
3330 kfree(b);
3331 }
3332}
3333
3334int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3335{
3336 struct bdaddr_list *entry;
3337
3338 if (!bacmp(bdaddr, BDADDR_ANY))
3339 return -EBADF;
3340
3341 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3342 if (!entry)
3343 return -ENOMEM;
3344
3345 bacpy(&entry->bdaddr, bdaddr);
3346 entry->bdaddr_type = type;
3347
3348 list_add(&entry->list, &hdev->le_white_list);
3349
3350 return 0;
3351}
3352
3353int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3354{
3355 struct bdaddr_list *entry;
3356
3357 if (!bacmp(bdaddr, BDADDR_ANY))
3358 return -EBADF;
3359
3360 entry = hci_white_list_lookup(hdev, bdaddr, type);
3361 if (!entry)
3362 return -ENOENT;
3363
3364 list_del(&entry->list);
3365 kfree(entry);
3366
3367 return 0;
3368}
3369
Andre Guedes15819a72014-02-03 13:56:18 -03003370/* This function requires the caller holds hdev->lock */
3371struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3372 bdaddr_t *addr, u8 addr_type)
3373{
3374 struct hci_conn_params *params;
3375
3376 list_for_each_entry(params, &hdev->le_conn_params, list) {
3377 if (bacmp(&params->addr, addr) == 0 &&
3378 params->addr_type == addr_type) {
3379 return params;
3380 }
3381 }
3382
3383 return NULL;
3384}
3385
Andre Guedescef952c2014-02-26 20:21:49 -03003386static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3387{
3388 struct hci_conn *conn;
3389
3390 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3391 if (!conn)
3392 return false;
3393
3394 if (conn->dst_type != type)
3395 return false;
3396
3397 if (conn->state != BT_CONNECTED)
3398 return false;
3399
3400 return true;
3401}
3402
Andre Guedesa9b0a042014-02-26 20:21:52 -03003403static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3404{
3405 if (addr_type == ADDR_LE_DEV_PUBLIC)
3406 return true;
3407
3408 /* Check for Random Static address type */
3409 if ((addr->b[5] & 0xc0) == 0xc0)
3410 return true;
3411
3412 return false;
3413}
3414
Andre Guedes15819a72014-02-03 13:56:18 -03003415/* This function requires the caller holds hdev->lock */
Marcel Holtmann4b109662014-06-29 13:41:49 +02003416struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3417 bdaddr_t *addr, u8 addr_type)
3418{
3419 struct bdaddr_list *entry;
3420
3421 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3422 if (bacmp(&entry->bdaddr, addr) == 0 &&
3423 entry->bdaddr_type == addr_type)
3424 return entry;
3425 }
3426
3427 return NULL;
3428}
3429
3430/* This function requires the caller holds hdev->lock */
3431void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3432{
3433 struct bdaddr_list *entry;
3434
3435 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3436 if (entry)
3437 goto done;
3438
3439 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3440 if (!entry) {
3441 BT_ERR("Out of memory");
3442 return;
3443 }
3444
3445 bacpy(&entry->bdaddr, addr);
3446 entry->bdaddr_type = addr_type;
3447
3448 list_add(&entry->list, &hdev->pend_le_conns);
3449
3450 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3451
3452done:
3453 hci_update_background_scan(hdev);
3454}
3455
3456/* This function requires the caller holds hdev->lock */
3457void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3458{
3459 struct bdaddr_list *entry;
3460
3461 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3462 if (!entry)
3463 goto done;
3464
3465 list_del(&entry->list);
3466 kfree(entry);
3467
3468 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3469
3470done:
3471 hci_update_background_scan(hdev);
3472}
3473
3474/* This function requires the caller holds hdev->lock */
3475void hci_pend_le_conns_clear(struct hci_dev *hdev)
3476{
3477 struct bdaddr_list *entry, *tmp;
3478
3479 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3480 list_del(&entry->list);
3481 kfree(entry);
3482 }
3483
3484 BT_DBG("All LE pending connections cleared");
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02003485
3486 hci_update_background_scan(hdev);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003487}
3488
3489/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003490int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3491 u8 auto_connect, u16 conn_min_interval,
3492 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003493{
3494 struct hci_conn_params *params;
3495
Andre Guedesa9b0a042014-02-26 20:21:52 -03003496 if (!is_identity_address(addr, addr_type))
3497 return -EINVAL;
3498
Andre Guedes15819a72014-02-03 13:56:18 -03003499 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003500 if (params)
3501 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003502
3503 params = kzalloc(sizeof(*params), GFP_KERNEL);
3504 if (!params) {
3505 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003506 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003507 }
3508
3509 bacpy(&params->addr, addr);
3510 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003511
3512 list_add(&params->list, &hdev->le_conn_params);
3513
3514update:
Andre Guedes15819a72014-02-03 13:56:18 -03003515 params->conn_min_interval = conn_min_interval;
3516 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003517 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003518
Andre Guedescef952c2014-02-26 20:21:49 -03003519 switch (auto_connect) {
3520 case HCI_AUTO_CONN_DISABLED:
3521 case HCI_AUTO_CONN_LINK_LOSS:
3522 hci_pend_le_conn_del(hdev, addr, addr_type);
3523 break;
3524 case HCI_AUTO_CONN_ALWAYS:
3525 if (!is_connected(hdev, addr, addr_type))
3526 hci_pend_le_conn_add(hdev, addr, addr_type);
3527 break;
3528 }
Andre Guedes15819a72014-02-03 13:56:18 -03003529
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003530 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3531 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3532 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003533
3534 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003535}
3536
3537/* This function requires the caller holds hdev->lock */
3538void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3539{
3540 struct hci_conn_params *params;
3541
3542 params = hci_conn_params_lookup(hdev, addr, addr_type);
3543 if (!params)
3544 return;
3545
Andre Guedescef952c2014-02-26 20:21:49 -03003546 hci_pend_le_conn_del(hdev, addr, addr_type);
3547
Andre Guedes15819a72014-02-03 13:56:18 -03003548 list_del(&params->list);
3549 kfree(params);
3550
3551 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3552}
3553
3554/* This function requires the caller holds hdev->lock */
3555void hci_conn_params_clear(struct hci_dev *hdev)
3556{
3557 struct hci_conn_params *params, *tmp;
3558
3559 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3560 list_del(&params->list);
3561 kfree(params);
3562 }
3563
Marcel Holtmann1089b672014-06-29 13:41:50 +02003564 hci_pend_le_conns_clear(hdev);
3565
Andre Guedes15819a72014-02-03 13:56:18 -03003566 BT_DBG("All LE connection parameters were removed");
3567}
3568
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003569static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003570{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003571 if (status) {
3572 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003573
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003574 hci_dev_lock(hdev);
3575 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3576 hci_dev_unlock(hdev);
3577 return;
3578 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003579}
3580
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003581static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003582{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003583 /* General inquiry access code (GIAC) */
3584 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3585 struct hci_request req;
3586 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003587 int err;
3588
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003589 if (status) {
3590 BT_ERR("Failed to disable LE scanning: status %d", status);
3591 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003592 }
3593
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003594 switch (hdev->discovery.type) {
3595 case DISCOV_TYPE_LE:
3596 hci_dev_lock(hdev);
3597 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3598 hci_dev_unlock(hdev);
3599 break;
3600
3601 case DISCOV_TYPE_INTERLEAVED:
3602 hci_req_init(&req, hdev);
3603
3604 memset(&cp, 0, sizeof(cp));
3605 memcpy(&cp.lap, lap, sizeof(cp.lap));
3606 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3607 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3608
3609 hci_dev_lock(hdev);
3610
3611 hci_inquiry_cache_flush(hdev);
3612
3613 err = hci_req_run(&req, inquiry_complete);
3614 if (err) {
3615 BT_ERR("Inquiry request failed: err %d", err);
3616 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3617 }
3618
3619 hci_dev_unlock(hdev);
3620 break;
3621 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003622}
3623
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003624static void le_scan_disable_work(struct work_struct *work)
3625{
3626 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003627 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003628 struct hci_request req;
3629 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003630
3631 BT_DBG("%s", hdev->name);
3632
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003633 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003634
Andre Guedesb1efcc22014-02-26 20:21:40 -03003635 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003636
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003637 err = hci_req_run(&req, le_scan_disable_work_complete);
3638 if (err)
3639 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003640}
3641
Johan Hedberg8d972502014-02-28 12:54:14 +02003642static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3643{
3644 struct hci_dev *hdev = req->hdev;
3645
3646 /* If we're advertising or initiating an LE connection we can't
3647 * go ahead and change the random address at this time. This is
3648 * because the eventual initiator address used for the
3649 * subsequently created connection will be undefined (some
3650 * controllers use the new address and others the one we had
3651 * when the operation started).
3652 *
3653 * In this kind of scenario skip the update and let the random
3654 * address be updated at the next cycle.
3655 */
3656 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3657 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3658 BT_DBG("Deferring random address update");
3659 return;
3660 }
3661
3662 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3663}
3664
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003665int hci_update_random_address(struct hci_request *req, bool require_privacy,
3666 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003667{
3668 struct hci_dev *hdev = req->hdev;
3669 int err;
3670
3671 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003672 * current RPA has expired or there is something else than
3673 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003674 */
3675 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003676 int to;
3677
3678 *own_addr_type = ADDR_LE_DEV_RANDOM;
3679
3680 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003681 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003682 return 0;
3683
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003684 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003685 if (err < 0) {
3686 BT_ERR("%s failed to generate new RPA", hdev->name);
3687 return err;
3688 }
3689
Johan Hedberg8d972502014-02-28 12:54:14 +02003690 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003691
3692 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3693 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3694
3695 return 0;
3696 }
3697
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003698 /* In case of required privacy without resolvable private address,
3699 * use an unresolvable private address. This is useful for active
3700 * scanning and non-connectable advertising.
3701 */
3702 if (require_privacy) {
3703 bdaddr_t urpa;
3704
3705 get_random_bytes(&urpa, 6);
3706 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3707
3708 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003709 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003710 return 0;
3711 }
3712
Johan Hedbergebd3a742014-02-23 19:42:21 +02003713 /* If forcing static address is in use or there is no public
3714 * address use the static address as random address (but skip
3715 * the HCI command if the current random address is already the
3716 * static one.
3717 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003718 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003719 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3720 *own_addr_type = ADDR_LE_DEV_RANDOM;
3721 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3722 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3723 &hdev->static_addr);
3724 return 0;
3725 }
3726
3727 /* Neither privacy nor static address is being used so use a
3728 * public address.
3729 */
3730 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3731
3732 return 0;
3733}
3734
Johan Hedberga1f4c312014-02-27 14:05:41 +02003735/* Copy the Identity Address of the controller.
3736 *
3737 * If the controller has a public BD_ADDR, then by default use that one.
3738 * If this is a LE only controller without a public address, default to
3739 * the static random address.
3740 *
3741 * For debugging purposes it is possible to force controllers with a
3742 * public address to use the static random address instead.
3743 */
3744void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3745 u8 *bdaddr_type)
3746{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003747 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003748 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3749 bacpy(bdaddr, &hdev->static_addr);
3750 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3751 } else {
3752 bacpy(bdaddr, &hdev->bdaddr);
3753 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3754 }
3755}
3756
David Herrmann9be0dab2012-04-22 14:39:57 +02003757/* Alloc HCI device */
3758struct hci_dev *hci_alloc_dev(void)
3759{
3760 struct hci_dev *hdev;
3761
3762 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3763 if (!hdev)
3764 return NULL;
3765
David Herrmannb1b813d2012-04-22 14:39:58 +02003766 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3767 hdev->esco_type = (ESCO_HV1);
3768 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003769 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3770 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003771 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3772 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003773
David Herrmannb1b813d2012-04-22 14:39:58 +02003774 hdev->sniff_max_interval = 800;
3775 hdev->sniff_min_interval = 80;
3776
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003777 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003778 hdev->le_scan_interval = 0x0060;
3779 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003780 hdev->le_conn_min_interval = 0x0028;
3781 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003782
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003783 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003784 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003785 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3786 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003787
David Herrmannb1b813d2012-04-22 14:39:58 +02003788 mutex_init(&hdev->lock);
3789 mutex_init(&hdev->req_lock);
3790
3791 INIT_LIST_HEAD(&hdev->mgmt_pending);
3792 INIT_LIST_HEAD(&hdev->blacklist);
3793 INIT_LIST_HEAD(&hdev->uuids);
3794 INIT_LIST_HEAD(&hdev->link_keys);
3795 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003796 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003797 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003798 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003799 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003800 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003801 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003802
3803 INIT_WORK(&hdev->rx_work, hci_rx_work);
3804 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3805 INIT_WORK(&hdev->tx_work, hci_tx_work);
3806 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003807
David Herrmannb1b813d2012-04-22 14:39:58 +02003808 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3809 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3810 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3811
David Herrmannb1b813d2012-04-22 14:39:58 +02003812 skb_queue_head_init(&hdev->rx_q);
3813 skb_queue_head_init(&hdev->cmd_q);
3814 skb_queue_head_init(&hdev->raw_q);
3815
3816 init_waitqueue_head(&hdev->req_wait_q);
3817
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003818 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003819
David Herrmannb1b813d2012-04-22 14:39:58 +02003820 hci_init_sysfs(hdev);
3821 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003822
3823 return hdev;
3824}
3825EXPORT_SYMBOL(hci_alloc_dev);
3826
3827/* Free HCI device */
3828void hci_free_dev(struct hci_dev *hdev)
3829{
David Herrmann9be0dab2012-04-22 14:39:57 +02003830 /* will free via device release */
3831 put_device(&hdev->dev);
3832}
3833EXPORT_SYMBOL(hci_free_dev);
3834
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835/* Register HCI device */
3836int hci_register_dev(struct hci_dev *hdev)
3837{
David Herrmannb1b813d2012-04-22 14:39:58 +02003838 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839
David Herrmann010666a2012-01-07 15:47:07 +01003840 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841 return -EINVAL;
3842
Mat Martineau08add512011-11-02 16:18:36 -07003843 /* Do not allow HCI_AMP devices to register at index 0,
3844 * so the index can be used as the AMP controller ID.
3845 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003846 switch (hdev->dev_type) {
3847 case HCI_BREDR:
3848 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3849 break;
3850 case HCI_AMP:
3851 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3852 break;
3853 default:
3854 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003856
Sasha Levin3df92b32012-05-27 22:36:56 +02003857 if (id < 0)
3858 return id;
3859
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 sprintf(hdev->name, "hci%d", id);
3861 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003862
3863 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3864
Kees Cookd8537542013-07-03 15:04:57 -07003865 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3866 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003867 if (!hdev->workqueue) {
3868 error = -ENOMEM;
3869 goto err;
3870 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003871
Kees Cookd8537542013-07-03 15:04:57 -07003872 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3873 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003874 if (!hdev->req_workqueue) {
3875 destroy_workqueue(hdev->workqueue);
3876 error = -ENOMEM;
3877 goto err;
3878 }
3879
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003880 if (!IS_ERR_OR_NULL(bt_debugfs))
3881 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3882
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003883 dev_set_name(&hdev->dev, "%s", hdev->name);
3884
Johan Hedberg99780a72014-02-18 10:40:07 +02003885 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3886 CRYPTO_ALG_ASYNC);
3887 if (IS_ERR(hdev->tfm_aes)) {
3888 BT_ERR("Unable to create crypto context");
3889 error = PTR_ERR(hdev->tfm_aes);
3890 hdev->tfm_aes = NULL;
3891 goto err_wqueue;
3892 }
3893
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003894 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003895 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003896 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003898 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003899 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3900 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003901 if (hdev->rfkill) {
3902 if (rfkill_register(hdev->rfkill) < 0) {
3903 rfkill_destroy(hdev->rfkill);
3904 hdev->rfkill = NULL;
3905 }
3906 }
3907
Johan Hedberg5e130362013-09-13 08:58:17 +03003908 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3909 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3910
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003911 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003912 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003913
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003914 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003915 /* Assume BR/EDR support until proven otherwise (such as
3916 * through reading supported features during init.
3917 */
3918 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3919 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003920
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003921 write_lock(&hci_dev_list_lock);
3922 list_add(&hdev->list, &hci_dev_list);
3923 write_unlock(&hci_dev_list_lock);
3924
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003925 /* Devices that are marked for raw-only usage need to set
3926 * the HCI_RAW flag to indicate that only user channel is
3927 * supported.
3928 */
3929 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3930 set_bit(HCI_RAW, &hdev->flags);
3931
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003933 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934
Johan Hedberg19202572013-01-14 22:33:51 +02003935 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003936
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003938
Johan Hedberg99780a72014-02-18 10:40:07 +02003939err_tfm:
3940 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003941err_wqueue:
3942 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003943 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003944err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003945 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003946
David Herrmann33ca9542011-10-08 14:58:49 +02003947 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948}
3949EXPORT_SYMBOL(hci_register_dev);
3950
3951/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003952void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953{
Sasha Levin3df92b32012-05-27 22:36:56 +02003954 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003955
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003956 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957
Johan Hovold94324962012-03-15 14:48:41 +01003958 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3959
Sasha Levin3df92b32012-05-27 22:36:56 +02003960 id = hdev->id;
3961
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003962 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003964 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965
3966 hci_dev_do_close(hdev);
3967
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303968 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003969 kfree_skb(hdev->reassembly[i]);
3970
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003971 cancel_work_sync(&hdev->power_on);
3972
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003973 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003974 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3975 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003976 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003977 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003978 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003979 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003980
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003981 /* mgmt_index_removed should take care of emptying the
3982 * pending list */
3983 BUG_ON(!list_empty(&hdev->mgmt_pending));
3984
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 hci_notify(hdev, HCI_DEV_UNREG);
3986
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003987 if (hdev->rfkill) {
3988 rfkill_unregister(hdev->rfkill);
3989 rfkill_destroy(hdev->rfkill);
3990 }
3991
Johan Hedberg99780a72014-02-18 10:40:07 +02003992 if (hdev->tfm_aes)
3993 crypto_free_blkcipher(hdev->tfm_aes);
3994
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003995 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003996
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003997 debugfs_remove_recursive(hdev->debugfs);
3998
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003999 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004000 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004001
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004002 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004003 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004004 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004005 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004006 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004007 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004008 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004009 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004010 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004011 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004012
David Herrmanndc946bd2012-01-07 15:47:24 +01004013 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004014
4015 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016}
4017EXPORT_SYMBOL(hci_unregister_dev);
4018
4019/* Suspend HCI device */
4020int hci_suspend_dev(struct hci_dev *hdev)
4021{
4022 hci_notify(hdev, HCI_DEV_SUSPEND);
4023 return 0;
4024}
4025EXPORT_SYMBOL(hci_suspend_dev);
4026
4027/* Resume HCI device */
4028int hci_resume_dev(struct hci_dev *hdev)
4029{
4030 hci_notify(hdev, HCI_DEV_RESUME);
4031 return 0;
4032}
4033EXPORT_SYMBOL(hci_resume_dev);
4034
Marcel Holtmann76bca882009-11-18 00:40:39 +01004035/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004036int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004037{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004038 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004039 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004040 kfree_skb(skb);
4041 return -ENXIO;
4042 }
4043
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004044 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004045 bt_cb(skb)->incoming = 1;
4046
4047 /* Time stamp */
4048 __net_timestamp(skb);
4049
Marcel Holtmann76bca882009-11-18 00:40:39 +01004050 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004051 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004052
Marcel Holtmann76bca882009-11-18 00:40:39 +01004053 return 0;
4054}
4055EXPORT_SYMBOL(hci_recv_frame);
4056
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304057static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004058 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304059{
4060 int len = 0;
4061 int hlen = 0;
4062 int remain = count;
4063 struct sk_buff *skb;
4064 struct bt_skb_cb *scb;
4065
4066 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004067 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304068 return -EILSEQ;
4069
4070 skb = hdev->reassembly[index];
4071
4072 if (!skb) {
4073 switch (type) {
4074 case HCI_ACLDATA_PKT:
4075 len = HCI_MAX_FRAME_SIZE;
4076 hlen = HCI_ACL_HDR_SIZE;
4077 break;
4078 case HCI_EVENT_PKT:
4079 len = HCI_MAX_EVENT_SIZE;
4080 hlen = HCI_EVENT_HDR_SIZE;
4081 break;
4082 case HCI_SCODATA_PKT:
4083 len = HCI_MAX_SCO_SIZE;
4084 hlen = HCI_SCO_HDR_SIZE;
4085 break;
4086 }
4087
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004088 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304089 if (!skb)
4090 return -ENOMEM;
4091
4092 scb = (void *) skb->cb;
4093 scb->expect = hlen;
4094 scb->pkt_type = type;
4095
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304096 hdev->reassembly[index] = skb;
4097 }
4098
4099 while (count) {
4100 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004101 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304102
4103 memcpy(skb_put(skb, len), data, len);
4104
4105 count -= len;
4106 data += len;
4107 scb->expect -= len;
4108 remain = count;
4109
4110 switch (type) {
4111 case HCI_EVENT_PKT:
4112 if (skb->len == HCI_EVENT_HDR_SIZE) {
4113 struct hci_event_hdr *h = hci_event_hdr(skb);
4114 scb->expect = h->plen;
4115
4116 if (skb_tailroom(skb) < scb->expect) {
4117 kfree_skb(skb);
4118 hdev->reassembly[index] = NULL;
4119 return -ENOMEM;
4120 }
4121 }
4122 break;
4123
4124 case HCI_ACLDATA_PKT:
4125 if (skb->len == HCI_ACL_HDR_SIZE) {
4126 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4127 scb->expect = __le16_to_cpu(h->dlen);
4128
4129 if (skb_tailroom(skb) < scb->expect) {
4130 kfree_skb(skb);
4131 hdev->reassembly[index] = NULL;
4132 return -ENOMEM;
4133 }
4134 }
4135 break;
4136
4137 case HCI_SCODATA_PKT:
4138 if (skb->len == HCI_SCO_HDR_SIZE) {
4139 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4140 scb->expect = h->dlen;
4141
4142 if (skb_tailroom(skb) < scb->expect) {
4143 kfree_skb(skb);
4144 hdev->reassembly[index] = NULL;
4145 return -ENOMEM;
4146 }
4147 }
4148 break;
4149 }
4150
4151 if (scb->expect == 0) {
4152 /* Complete frame */
4153
4154 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004155 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304156
4157 hdev->reassembly[index] = NULL;
4158 return remain;
4159 }
4160 }
4161
4162 return remain;
4163}
4164
Marcel Holtmannef222012007-07-11 06:42:04 +02004165int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4166{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304167 int rem = 0;
4168
Marcel Holtmannef222012007-07-11 06:42:04 +02004169 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4170 return -EILSEQ;
4171
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004172 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004173 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304174 if (rem < 0)
4175 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004176
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304177 data += (count - rem);
4178 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004179 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004180
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304181 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004182}
4183EXPORT_SYMBOL(hci_recv_fragment);
4184
Suraj Sumangala99811512010-07-14 13:02:19 +05304185#define STREAM_REASSEMBLY 0
4186
4187int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4188{
4189 int type;
4190 int rem = 0;
4191
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004192 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304193 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4194
4195 if (!skb) {
4196 struct { char type; } *pkt;
4197
4198 /* Start of the frame */
4199 pkt = data;
4200 type = pkt->type;
4201
4202 data++;
4203 count--;
4204 } else
4205 type = bt_cb(skb)->pkt_type;
4206
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004207 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004208 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304209 if (rem < 0)
4210 return rem;
4211
4212 data += (count - rem);
4213 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004214 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304215
4216 return rem;
4217}
4218EXPORT_SYMBOL(hci_recv_stream_fragment);
4219
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220/* ---- Interface to upper protocols ---- */
4221
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222int hci_register_cb(struct hci_cb *cb)
4223{
4224 BT_DBG("%p name %s", cb, cb->name);
4225
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004226 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004228 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229
4230 return 0;
4231}
4232EXPORT_SYMBOL(hci_register_cb);
4233
4234int hci_unregister_cb(struct hci_cb *cb)
4235{
4236 BT_DBG("%p name %s", cb, cb->name);
4237
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004238 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004240 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241
4242 return 0;
4243}
4244EXPORT_SYMBOL(hci_unregister_cb);
4245
Marcel Holtmann51086992013-10-10 14:54:19 -07004246static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004248 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004250 /* Time stamp */
4251 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004253 /* Send copy to monitor */
4254 hci_send_to_monitor(hdev, skb);
4255
4256 if (atomic_read(&hdev->promisc)) {
4257 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004258 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 }
4260
4261 /* Get rid of skb owner, prior to sending to the driver. */
4262 skb_orphan(skb);
4263
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004264 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004265 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266}
4267
Johan Hedberg3119ae92013-03-05 20:37:44 +02004268void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4269{
4270 skb_queue_head_init(&req->cmd_q);
4271 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004272 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004273}
4274
4275int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4276{
4277 struct hci_dev *hdev = req->hdev;
4278 struct sk_buff *skb;
4279 unsigned long flags;
4280
4281 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4282
Andre Guedes5d73e032013-03-08 11:20:16 -03004283 /* If an error occured during request building, remove all HCI
4284 * commands queued on the HCI request queue.
4285 */
4286 if (req->err) {
4287 skb_queue_purge(&req->cmd_q);
4288 return req->err;
4289 }
4290
Johan Hedberg3119ae92013-03-05 20:37:44 +02004291 /* Do not allow empty requests */
4292 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004293 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004294
4295 skb = skb_peek_tail(&req->cmd_q);
4296 bt_cb(skb)->req.complete = complete;
4297
4298 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4299 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4300 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4301
4302 queue_work(hdev->workqueue, &hdev->cmd_work);
4303
4304 return 0;
4305}
4306
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004307static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004308 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309{
4310 int len = HCI_COMMAND_HDR_SIZE + plen;
4311 struct hci_command_hdr *hdr;
4312 struct sk_buff *skb;
4313
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004315 if (!skb)
4316 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317
4318 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004319 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320 hdr->plen = plen;
4321
4322 if (plen)
4323 memcpy(skb_put(skb, plen), param, plen);
4324
4325 BT_DBG("skb len %d", skb->len);
4326
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004327 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004328
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004329 return skb;
4330}
4331
4332/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004333int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4334 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004335{
4336 struct sk_buff *skb;
4337
4338 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4339
4340 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4341 if (!skb) {
4342 BT_ERR("%s no memory for command", hdev->name);
4343 return -ENOMEM;
4344 }
4345
Johan Hedberg11714b32013-03-05 20:37:47 +02004346 /* Stand-alone HCI commands must be flaged as
4347 * single-command requests.
4348 */
4349 bt_cb(skb)->req.start = true;
4350
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004352 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353
4354 return 0;
4355}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356
Johan Hedberg71c76a12013-03-05 20:37:46 +02004357/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004358void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4359 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004360{
4361 struct hci_dev *hdev = req->hdev;
4362 struct sk_buff *skb;
4363
4364 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4365
Andre Guedes34739c12013-03-08 11:20:18 -03004366 /* If an error occured during request building, there is no point in
4367 * queueing the HCI command. We can simply return.
4368 */
4369 if (req->err)
4370 return;
4371
Johan Hedberg71c76a12013-03-05 20:37:46 +02004372 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4373 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004374 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4375 hdev->name, opcode);
4376 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004377 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004378 }
4379
4380 if (skb_queue_empty(&req->cmd_q))
4381 bt_cb(skb)->req.start = true;
4382
Johan Hedberg02350a72013-04-03 21:50:29 +03004383 bt_cb(skb)->req.event = event;
4384
Johan Hedberg71c76a12013-03-05 20:37:46 +02004385 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004386}
4387
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004388void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4389 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004390{
4391 hci_req_add_ev(req, opcode, plen, param, 0);
4392}
4393
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004395void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396{
4397 struct hci_command_hdr *hdr;
4398
4399 if (!hdev->sent_cmd)
4400 return NULL;
4401
4402 hdr = (void *) hdev->sent_cmd->data;
4403
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004404 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405 return NULL;
4406
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004407 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408
4409 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4410}
4411
4412/* Send ACL data */
4413static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4414{
4415 struct hci_acl_hdr *hdr;
4416 int len = skb->len;
4417
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004418 skb_push(skb, HCI_ACL_HDR_SIZE);
4419 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004420 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004421 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4422 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423}
4424
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004425static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004426 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004428 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429 struct hci_dev *hdev = conn->hdev;
4430 struct sk_buff *list;
4431
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004432 skb->len = skb_headlen(skb);
4433 skb->data_len = 0;
4434
4435 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004436
4437 switch (hdev->dev_type) {
4438 case HCI_BREDR:
4439 hci_add_acl_hdr(skb, conn->handle, flags);
4440 break;
4441 case HCI_AMP:
4442 hci_add_acl_hdr(skb, chan->handle, flags);
4443 break;
4444 default:
4445 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4446 return;
4447 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004448
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004449 list = skb_shinfo(skb)->frag_list;
4450 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 /* Non fragmented */
4452 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4453
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004454 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455 } else {
4456 /* Fragmented */
4457 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4458
4459 skb_shinfo(skb)->frag_list = NULL;
4460
4461 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004462 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004464 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004465
4466 flags &= ~ACL_START;
4467 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 do {
4469 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004470
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004471 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004472 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473
4474 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4475
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004476 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 } while (list);
4478
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004479 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004481}
4482
4483void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4484{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004485 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004486
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004487 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004488
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004489 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004491 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493
4494/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004495void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496{
4497 struct hci_dev *hdev = conn->hdev;
4498 struct hci_sco_hdr hdr;
4499
4500 BT_DBG("%s len %d", hdev->name, skb->len);
4501
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004502 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503 hdr.dlen = skb->len;
4504
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004505 skb_push(skb, HCI_SCO_HDR_SIZE);
4506 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004507 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004509 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004510
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004512 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514
4515/* ---- HCI TX task (outgoing data) ---- */
4516
4517/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004518static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4519 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520{
4521 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004522 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004523 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004525 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004527
4528 rcu_read_lock();
4529
4530 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004531 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004532 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004533
4534 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4535 continue;
4536
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537 num++;
4538
4539 if (c->sent < min) {
4540 min = c->sent;
4541 conn = c;
4542 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004543
4544 if (hci_conn_num(hdev, type) == num)
4545 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 }
4547
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004548 rcu_read_unlock();
4549
Linus Torvalds1da177e2005-04-16 15:20:36 -07004550 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004551 int cnt, q;
4552
4553 switch (conn->type) {
4554 case ACL_LINK:
4555 cnt = hdev->acl_cnt;
4556 break;
4557 case SCO_LINK:
4558 case ESCO_LINK:
4559 cnt = hdev->sco_cnt;
4560 break;
4561 case LE_LINK:
4562 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4563 break;
4564 default:
4565 cnt = 0;
4566 BT_ERR("Unknown link type");
4567 }
4568
4569 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570 *quote = q ? q : 1;
4571 } else
4572 *quote = 0;
4573
4574 BT_DBG("conn %p quote %d", conn, *quote);
4575 return conn;
4576}
4577
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004578static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579{
4580 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004581 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582
Ville Tervobae1f5d92011-02-10 22:38:53 -03004583 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004585 rcu_read_lock();
4586
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004588 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004589 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004590 BT_ERR("%s killing stalled connection %pMR",
4591 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004592 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 }
4594 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004595
4596 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597}
4598
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004599static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4600 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004601{
4602 struct hci_conn_hash *h = &hdev->conn_hash;
4603 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004604 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004605 struct hci_conn *conn;
4606 int cnt, q, conn_num = 0;
4607
4608 BT_DBG("%s", hdev->name);
4609
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004610 rcu_read_lock();
4611
4612 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004613 struct hci_chan *tmp;
4614
4615 if (conn->type != type)
4616 continue;
4617
4618 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4619 continue;
4620
4621 conn_num++;
4622
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004623 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004624 struct sk_buff *skb;
4625
4626 if (skb_queue_empty(&tmp->data_q))
4627 continue;
4628
4629 skb = skb_peek(&tmp->data_q);
4630 if (skb->priority < cur_prio)
4631 continue;
4632
4633 if (skb->priority > cur_prio) {
4634 num = 0;
4635 min = ~0;
4636 cur_prio = skb->priority;
4637 }
4638
4639 num++;
4640
4641 if (conn->sent < min) {
4642 min = conn->sent;
4643 chan = tmp;
4644 }
4645 }
4646
4647 if (hci_conn_num(hdev, type) == conn_num)
4648 break;
4649 }
4650
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004651 rcu_read_unlock();
4652
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004653 if (!chan)
4654 return NULL;
4655
4656 switch (chan->conn->type) {
4657 case ACL_LINK:
4658 cnt = hdev->acl_cnt;
4659 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004660 case AMP_LINK:
4661 cnt = hdev->block_cnt;
4662 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004663 case SCO_LINK:
4664 case ESCO_LINK:
4665 cnt = hdev->sco_cnt;
4666 break;
4667 case LE_LINK:
4668 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4669 break;
4670 default:
4671 cnt = 0;
4672 BT_ERR("Unknown link type");
4673 }
4674
4675 q = cnt / num;
4676 *quote = q ? q : 1;
4677 BT_DBG("chan %p quote %d", chan, *quote);
4678 return chan;
4679}
4680
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004681static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4682{
4683 struct hci_conn_hash *h = &hdev->conn_hash;
4684 struct hci_conn *conn;
4685 int num = 0;
4686
4687 BT_DBG("%s", hdev->name);
4688
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004689 rcu_read_lock();
4690
4691 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004692 struct hci_chan *chan;
4693
4694 if (conn->type != type)
4695 continue;
4696
4697 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4698 continue;
4699
4700 num++;
4701
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004702 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004703 struct sk_buff *skb;
4704
4705 if (chan->sent) {
4706 chan->sent = 0;
4707 continue;
4708 }
4709
4710 if (skb_queue_empty(&chan->data_q))
4711 continue;
4712
4713 skb = skb_peek(&chan->data_q);
4714 if (skb->priority >= HCI_PRIO_MAX - 1)
4715 continue;
4716
4717 skb->priority = HCI_PRIO_MAX - 1;
4718
4719 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004720 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004721 }
4722
4723 if (hci_conn_num(hdev, type) == num)
4724 break;
4725 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004726
4727 rcu_read_unlock();
4728
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004729}
4730
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004731static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4732{
4733 /* Calculate count of blocks used by this packet */
4734 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4735}
4736
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004737static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738{
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004739 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 /* ACL tx timeout must be longer than maximum
4741 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004742 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004743 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004744 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004746}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004748static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004749{
4750 unsigned int cnt = hdev->acl_cnt;
4751 struct hci_chan *chan;
4752 struct sk_buff *skb;
4753 int quote;
4754
4755 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004756
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004757 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004758 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004759 u32 priority = (skb_peek(&chan->data_q))->priority;
4760 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004761 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004762 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004763
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004764 /* Stop if priority has changed */
4765 if (skb->priority < priority)
4766 break;
4767
4768 skb = skb_dequeue(&chan->data_q);
4769
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004770 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004771 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004772
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004773 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004774 hdev->acl_last_tx = jiffies;
4775
4776 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004777 chan->sent++;
4778 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004779 }
4780 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004781
4782 if (cnt != hdev->acl_cnt)
4783 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784}
4785
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004786static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004787{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004788 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004789 struct hci_chan *chan;
4790 struct sk_buff *skb;
4791 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004792 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004793
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004794 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004795
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004796 BT_DBG("%s", hdev->name);
4797
4798 if (hdev->dev_type == HCI_AMP)
4799 type = AMP_LINK;
4800 else
4801 type = ACL_LINK;
4802
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004803 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004804 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004805 u32 priority = (skb_peek(&chan->data_q))->priority;
4806 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4807 int blocks;
4808
4809 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004810 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004811
4812 /* Stop if priority has changed */
4813 if (skb->priority < priority)
4814 break;
4815
4816 skb = skb_dequeue(&chan->data_q);
4817
4818 blocks = __get_blocks(hdev, skb);
4819 if (blocks > hdev->block_cnt)
4820 return;
4821
4822 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004823 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004824
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004825 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004826 hdev->acl_last_tx = jiffies;
4827
4828 hdev->block_cnt -= blocks;
4829 quote -= blocks;
4830
4831 chan->sent += blocks;
4832 chan->conn->sent += blocks;
4833 }
4834 }
4835
4836 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004837 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004838}
4839
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004840static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004841{
4842 BT_DBG("%s", hdev->name);
4843
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004844 /* No ACL link over BR/EDR controller */
4845 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4846 return;
4847
4848 /* No AMP link over AMP controller */
4849 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004850 return;
4851
4852 switch (hdev->flow_ctl_mode) {
4853 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4854 hci_sched_acl_pkt(hdev);
4855 break;
4856
4857 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4858 hci_sched_acl_blk(hdev);
4859 break;
4860 }
4861}
4862
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004864static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865{
4866 struct hci_conn *conn;
4867 struct sk_buff *skb;
4868 int quote;
4869
4870 BT_DBG("%s", hdev->name);
4871
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004872 if (!hci_conn_num(hdev, SCO_LINK))
4873 return;
4874
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4876 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4877 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004878 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879
4880 conn->sent++;
4881 if (conn->sent == ~0)
4882 conn->sent = 0;
4883 }
4884 }
4885}
4886
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004887static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004888{
4889 struct hci_conn *conn;
4890 struct sk_buff *skb;
4891 int quote;
4892
4893 BT_DBG("%s", hdev->name);
4894
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004895 if (!hci_conn_num(hdev, ESCO_LINK))
4896 return;
4897
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004898 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4899 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004900 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4901 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004902 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004903
4904 conn->sent++;
4905 if (conn->sent == ~0)
4906 conn->sent = 0;
4907 }
4908 }
4909}
4910
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004911static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004912{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004913 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004914 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004915 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004916
4917 BT_DBG("%s", hdev->name);
4918
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004919 if (!hci_conn_num(hdev, LE_LINK))
4920 return;
4921
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004922 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004923 /* LE tx timeout must be longer than maximum
4924 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004925 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004926 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004927 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004928 }
4929
4930 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004931 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004932 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004933 u32 priority = (skb_peek(&chan->data_q))->priority;
4934 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004935 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004936 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004937
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004938 /* Stop if priority has changed */
4939 if (skb->priority < priority)
4940 break;
4941
4942 skb = skb_dequeue(&chan->data_q);
4943
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004944 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004945 hdev->le_last_tx = jiffies;
4946
4947 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004948 chan->sent++;
4949 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004950 }
4951 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004952
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004953 if (hdev->le_pkts)
4954 hdev->le_cnt = cnt;
4955 else
4956 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004957
4958 if (cnt != tmp)
4959 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004960}
4961
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004962static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004964 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965 struct sk_buff *skb;
4966
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004967 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004968 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969
Marcel Holtmann52de5992013-09-03 18:08:38 -07004970 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4971 /* Schedule queues and send stuff to HCI driver */
4972 hci_sched_acl(hdev);
4973 hci_sched_sco(hdev);
4974 hci_sched_esco(hdev);
4975 hci_sched_le(hdev);
4976 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004977
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978 /* Send next queued raw (unknown type) packet */
4979 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004980 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981}
4982
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004983/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984
4985/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004986static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987{
4988 struct hci_acl_hdr *hdr = (void *) skb->data;
4989 struct hci_conn *conn;
4990 __u16 handle, flags;
4991
4992 skb_pull(skb, HCI_ACL_HDR_SIZE);
4993
4994 handle = __le16_to_cpu(hdr->handle);
4995 flags = hci_flags(handle);
4996 handle = hci_handle(handle);
4997
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004998 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004999 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000
5001 hdev->stat.acl_rx++;
5002
5003 hci_dev_lock(hdev);
5004 conn = hci_conn_hash_lookup_handle(hdev, handle);
5005 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005006
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005008 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005009
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005011 l2cap_recv_acldata(conn, skb, flags);
5012 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005014 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005015 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005016 }
5017
5018 kfree_skb(skb);
5019}
5020
5021/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005022static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023{
5024 struct hci_sco_hdr *hdr = (void *) skb->data;
5025 struct hci_conn *conn;
5026 __u16 handle;
5027
5028 skb_pull(skb, HCI_SCO_HDR_SIZE);
5029
5030 handle = __le16_to_cpu(hdr->handle);
5031
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005032 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033
5034 hdev->stat.sco_rx++;
5035
5036 hci_dev_lock(hdev);
5037 conn = hci_conn_hash_lookup_handle(hdev, handle);
5038 hci_dev_unlock(hdev);
5039
5040 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005042 sco_recv_scodata(conn, skb);
5043 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005044 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005045 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005046 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047 }
5048
5049 kfree_skb(skb);
5050}
5051
Johan Hedberg9238f362013-03-05 20:37:48 +02005052static bool hci_req_is_complete(struct hci_dev *hdev)
5053{
5054 struct sk_buff *skb;
5055
5056 skb = skb_peek(&hdev->cmd_q);
5057 if (!skb)
5058 return true;
5059
5060 return bt_cb(skb)->req.start;
5061}
5062
Johan Hedberg42c6b122013-03-05 20:37:49 +02005063static void hci_resend_last(struct hci_dev *hdev)
5064{
5065 struct hci_command_hdr *sent;
5066 struct sk_buff *skb;
5067 u16 opcode;
5068
5069 if (!hdev->sent_cmd)
5070 return;
5071
5072 sent = (void *) hdev->sent_cmd->data;
5073 opcode = __le16_to_cpu(sent->opcode);
5074 if (opcode == HCI_OP_RESET)
5075 return;
5076
5077 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5078 if (!skb)
5079 return;
5080
5081 skb_queue_head(&hdev->cmd_q, skb);
5082 queue_work(hdev->workqueue, &hdev->cmd_work);
5083}
5084
Johan Hedberg9238f362013-03-05 20:37:48 +02005085void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5086{
5087 hci_req_complete_t req_complete = NULL;
5088 struct sk_buff *skb;
5089 unsigned long flags;
5090
5091 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5092
Johan Hedberg42c6b122013-03-05 20:37:49 +02005093 /* If the completed command doesn't match the last one that was
5094 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005095 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005096 if (!hci_sent_cmd_data(hdev, opcode)) {
5097 /* Some CSR based controllers generate a spontaneous
5098 * reset complete event during init and any pending
5099 * command will never be completed. In such a case we
5100 * need to resend whatever was the last sent
5101 * command.
5102 */
5103 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5104 hci_resend_last(hdev);
5105
Johan Hedberg9238f362013-03-05 20:37:48 +02005106 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005107 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005108
5109 /* If the command succeeded and there's still more commands in
5110 * this request the request is not yet complete.
5111 */
5112 if (!status && !hci_req_is_complete(hdev))
5113 return;
5114
5115 /* If this was the last command in a request the complete
5116 * callback would be found in hdev->sent_cmd instead of the
5117 * command queue (hdev->cmd_q).
5118 */
5119 if (hdev->sent_cmd) {
5120 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005121
5122 if (req_complete) {
5123 /* We must set the complete callback to NULL to
5124 * avoid calling the callback more than once if
5125 * this function gets called again.
5126 */
5127 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5128
Johan Hedberg9238f362013-03-05 20:37:48 +02005129 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005130 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005131 }
5132
5133 /* Remove all pending commands belonging to this request */
5134 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5135 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5136 if (bt_cb(skb)->req.start) {
5137 __skb_queue_head(&hdev->cmd_q, skb);
5138 break;
5139 }
5140
5141 req_complete = bt_cb(skb)->req.complete;
5142 kfree_skb(skb);
5143 }
5144 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5145
5146call_complete:
5147 if (req_complete)
5148 req_complete(hdev, status);
5149}
5150
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005151static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005153 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154 struct sk_buff *skb;
5155
5156 BT_DBG("%s", hdev->name);
5157
Linus Torvalds1da177e2005-04-16 15:20:36 -07005158 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005159 /* Send copy to monitor */
5160 hci_send_to_monitor(hdev, skb);
5161
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162 if (atomic_read(&hdev->promisc)) {
5163 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005164 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165 }
5166
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005167 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 kfree_skb(skb);
5169 continue;
5170 }
5171
5172 if (test_bit(HCI_INIT, &hdev->flags)) {
5173 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005174 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175 case HCI_ACLDATA_PKT:
5176 case HCI_SCODATA_PKT:
5177 kfree_skb(skb);
5178 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005179 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180 }
5181
5182 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005183 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005185 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186 hci_event_packet(hdev, skb);
5187 break;
5188
5189 case HCI_ACLDATA_PKT:
5190 BT_DBG("%s ACL data packet", hdev->name);
5191 hci_acldata_packet(hdev, skb);
5192 break;
5193
5194 case HCI_SCODATA_PKT:
5195 BT_DBG("%s SCO data packet", hdev->name);
5196 hci_scodata_packet(hdev, skb);
5197 break;
5198
5199 default:
5200 kfree_skb(skb);
5201 break;
5202 }
5203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005204}
5205
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005206static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005208 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209 struct sk_buff *skb;
5210
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005211 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5212 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005215 if (atomic_read(&hdev->cmd_cnt)) {
5216 skb = skb_dequeue(&hdev->cmd_q);
5217 if (!skb)
5218 return;
5219
Wei Yongjun7585b972009-02-25 18:29:52 +08005220 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005222 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005223 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005225 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005226 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005227 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005228 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005229 schedule_delayed_work(&hdev->cmd_timer,
5230 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 } else {
5232 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005233 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 }
5235 }
5236}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005237
5238void hci_req_add_le_scan_disable(struct hci_request *req)
5239{
5240 struct hci_cp_le_set_scan_enable cp;
5241
5242 memset(&cp, 0, sizeof(cp));
5243 cp.enable = LE_SCAN_DISABLE;
5244 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5245}
Andre Guedesa4790db2014-02-26 20:21:47 -03005246
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005247void hci_req_add_le_passive_scan(struct hci_request *req)
5248{
5249 struct hci_cp_le_set_scan_param param_cp;
5250 struct hci_cp_le_set_scan_enable enable_cp;
5251 struct hci_dev *hdev = req->hdev;
5252 u8 own_addr_type;
5253
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005254 /* Set require_privacy to false since no SCAN_REQ are send
5255 * during passive scanning. Not using an unresolvable address
5256 * here is important so that peer devices using direct
5257 * advertising with our address will be correctly reported
5258 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005259 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005260 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005261 return;
5262
5263 memset(&param_cp, 0, sizeof(param_cp));
5264 param_cp.type = LE_SCAN_PASSIVE;
5265 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5266 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5267 param_cp.own_address_type = own_addr_type;
5268 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5269 &param_cp);
5270
5271 memset(&enable_cp, 0, sizeof(enable_cp));
5272 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005273 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005274 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5275 &enable_cp);
5276}
5277
Andre Guedesa4790db2014-02-26 20:21:47 -03005278static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5279{
5280 if (status)
5281 BT_DBG("HCI request failed to update background scanning: "
5282 "status 0x%2.2x", status);
5283}
5284
5285/* This function controls the background scanning based on hdev->pend_le_conns
5286 * list. If there are pending LE connection we start the background scanning,
5287 * otherwise we stop it.
5288 *
5289 * This function requires the caller holds hdev->lock.
5290 */
5291void hci_update_background_scan(struct hci_dev *hdev)
5292{
Andre Guedesa4790db2014-02-26 20:21:47 -03005293 struct hci_request req;
5294 struct hci_conn *conn;
5295 int err;
5296
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005297 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5298 return;
5299
Andre Guedesa4790db2014-02-26 20:21:47 -03005300 hci_req_init(&req, hdev);
5301
5302 if (list_empty(&hdev->pend_le_conns)) {
5303 /* If there is no pending LE connections, we should stop
5304 * the background scanning.
5305 */
5306
5307 /* If controller is not scanning we are done. */
5308 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5309 return;
5310
5311 hci_req_add_le_scan_disable(&req);
5312
5313 BT_DBG("%s stopping background scanning", hdev->name);
5314 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005315 /* If there is at least one pending LE connection, we should
5316 * keep the background scan running.
5317 */
5318
Andre Guedesa4790db2014-02-26 20:21:47 -03005319 /* If controller is connecting, we should not start scanning
5320 * since some controllers are not able to scan and connect at
5321 * the same time.
5322 */
5323 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5324 if (conn)
5325 return;
5326
Andre Guedes4340a122014-03-10 18:26:24 -03005327 /* If controller is currently scanning, we stop it to ensure we
5328 * don't miss any advertising (due to duplicates filter).
5329 */
5330 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5331 hci_req_add_le_scan_disable(&req);
5332
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005333 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005334
5335 BT_DBG("%s starting background scanning", hdev->name);
5336 }
5337
5338 err = hci_req_run(&req, update_background_scan_complete);
5339 if (err)
5340 BT_ERR("Failed to run HCI request: err %d", err);
5341}