blob: a67a4b8e4e1cc4ca859032eb357c21a82a509c3a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
277 struct list_head *p, *n;
278
279 hci_dev_lock(hdev);
280 list_for_each_safe(p, n, &hdev->link_keys) {
281 struct link_key *key = list_entry(p, struct link_key, list);
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
284 }
285 hci_dev_unlock(hdev);
286
287 return 0;
288}
289
290static int link_keys_open(struct inode *inode, struct file *file)
291{
292 return single_open(file, link_keys_show, inode->i_private);
293}
294
295static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300};
301
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700302static int dev_class_show(struct seq_file *f, void *ptr)
303{
304 struct hci_dev *hdev = f->private;
305
306 hci_dev_lock(hdev);
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
310
311 return 0;
312}
313
314static int dev_class_open(struct inode *inode, struct file *file)
315{
316 return single_open(file, dev_class_show, inode->i_private);
317}
318
319static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
321 .read = seq_read,
322 .llseek = seq_lseek,
323 .release = single_release,
324};
325
Marcel Holtmann041000b2013-10-17 12:02:31 -0700326static int voice_setting_get(void *data, u64 *val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
339
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700340static int auto_accept_delay_set(void *data, u64 val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int auto_accept_delay_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
364
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800365static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
367{
368 struct hci_dev *hdev = file->private_data;
369 char buf[3];
370
Marcel Holtmann111902f2014-06-21 04:53:17 +0200371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800372 buf[1] = '\n';
373 buf[2] = '\0';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375}
376
377static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
380{
381 struct hci_dev *hdev = file->private_data;
382 char buf[32];
383 size_t buf_size = min(count, (sizeof(buf)-1));
384 bool enable;
385
386 if (test_bit(HCI_UP, &hdev->flags))
387 return -EBUSY;
388
389 if (copy_from_user(buf, user_buf, buf_size))
390 return -EFAULT;
391
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
394 return -EINVAL;
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800397 return -EALREADY;
398
Marcel Holtmann111902f2014-06-21 04:53:17 +0200399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800400
401 return count;
402}
403
404static const struct file_operations force_sc_support_fops = {
405 .open = simple_open,
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
409};
410
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800411static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
413{
414 struct hci_dev *hdev = file->private_data;
415 char buf[3];
416
417 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
418 buf[1] = '\n';
419 buf[2] = '\0';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421}
422
423static const struct file_operations sc_only_mode_fops = {
424 .open = simple_open,
425 .read = sc_only_mode_read,
426 .llseek = default_llseek,
427};
428
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429static int idle_timeout_set(void *data, u64 val)
430{
431 struct hci_dev *hdev = data;
432
433 if (val != 0 && (val < 500 || val > 3600000))
434 return -EINVAL;
435
436 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700437 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443static int idle_timeout_get(void *data, u64 *val)
444{
445 struct hci_dev *hdev = data;
446
447 hci_dev_lock(hdev);
448 *val = hdev->idle_timeout;
449 hci_dev_unlock(hdev);
450
451 return 0;
452}
453
454DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455 idle_timeout_set, "%llu\n");
456
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200457static int rpa_timeout_set(void *data, u64 val)
458{
459 struct hci_dev *hdev = data;
460
461 /* Require the RPA timeout to be at least 30 seconds and at most
462 * 24 hours.
463 */
464 if (val < 30 || val > (60 * 60 * 24))
465 return -EINVAL;
466
467 hci_dev_lock(hdev);
468 hdev->rpa_timeout = val;
469 hci_dev_unlock(hdev);
470
471 return 0;
472}
473
474static int rpa_timeout_get(void *data, u64 *val)
475{
476 struct hci_dev *hdev = data;
477
478 hci_dev_lock(hdev);
479 *val = hdev->rpa_timeout;
480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486 rpa_timeout_set, "%llu\n");
487
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488static int sniff_min_interval_set(void *data, u64 val)
489{
490 struct hci_dev *hdev = data;
491
492 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
493 return -EINVAL;
494
495 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700496 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700497 hci_dev_unlock(hdev);
498
499 return 0;
500}
501
502static int sniff_min_interval_get(void *data, u64 *val)
503{
504 struct hci_dev *hdev = data;
505
506 hci_dev_lock(hdev);
507 *val = hdev->sniff_min_interval;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514 sniff_min_interval_set, "%llu\n");
515
516static int sniff_max_interval_set(void *data, u64 val)
517{
518 struct hci_dev *hdev = data;
519
520 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
521 return -EINVAL;
522
523 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700524 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700525 hci_dev_unlock(hdev);
526
527 return 0;
528}
529
530static int sniff_max_interval_get(void *data, u64 *val)
531{
532 struct hci_dev *hdev = data;
533
534 hci_dev_lock(hdev);
535 *val = hdev->sniff_max_interval;
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542 sniff_max_interval_set, "%llu\n");
543
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200544static int conn_info_min_age_set(void *data, u64 val)
545{
546 struct hci_dev *hdev = data;
547
548 if (val == 0 || val > hdev->conn_info_max_age)
549 return -EINVAL;
550
551 hci_dev_lock(hdev);
552 hdev->conn_info_min_age = val;
553 hci_dev_unlock(hdev);
554
555 return 0;
556}
557
558static int conn_info_min_age_get(void *data, u64 *val)
559{
560 struct hci_dev *hdev = data;
561
562 hci_dev_lock(hdev);
563 *val = hdev->conn_info_min_age;
564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570 conn_info_min_age_set, "%llu\n");
571
572static int conn_info_max_age_set(void *data, u64 val)
573{
574 struct hci_dev *hdev = data;
575
576 if (val == 0 || val < hdev->conn_info_min_age)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->conn_info_max_age = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int conn_info_max_age_get(void *data, u64 *val)
587{
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->conn_info_max_age;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598 conn_info_max_age_set, "%llu\n");
599
Marcel Holtmannac345812014-02-23 12:44:25 -0800600static int identity_show(struct seq_file *f, void *p)
601{
602 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200603 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800604 u8 addr_type;
605
606 hci_dev_lock(hdev);
607
Johan Hedberga1f4c312014-02-27 14:05:41 +0200608 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800609
Johan Hedberga1f4c312014-02-27 14:05:41 +0200610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800611 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800612
613 hci_dev_unlock(hdev);
614
615 return 0;
616}
617
618static int identity_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, identity_show, inode->i_private);
621}
622
623static const struct file_operations identity_fops = {
624 .open = identity_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800630static int random_address_show(struct seq_file *f, void *p)
631{
632 struct hci_dev *hdev = f->private;
633
634 hci_dev_lock(hdev);
635 seq_printf(f, "%pMR\n", &hdev->random_addr);
636 hci_dev_unlock(hdev);
637
638 return 0;
639}
640
641static int random_address_open(struct inode *inode, struct file *file)
642{
643 return single_open(file, random_address_show, inode->i_private);
644}
645
646static const struct file_operations random_address_fops = {
647 .open = random_address_open,
648 .read = seq_read,
649 .llseek = seq_lseek,
650 .release = single_release,
651};
652
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700653static int static_address_show(struct seq_file *f, void *p)
654{
655 struct hci_dev *hdev = f->private;
656
657 hci_dev_lock(hdev);
658 seq_printf(f, "%pMR\n", &hdev->static_addr);
659 hci_dev_unlock(hdev);
660
661 return 0;
662}
663
664static int static_address_open(struct inode *inode, struct file *file)
665{
666 return single_open(file, static_address_show, inode->i_private);
667}
668
669static const struct file_operations static_address_fops = {
670 .open = static_address_open,
671 .read = seq_read,
672 .llseek = seq_lseek,
673 .release = single_release,
674};
675
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800676static ssize_t force_static_address_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700679{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800680 struct hci_dev *hdev = file->private_data;
681 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700682
Marcel Holtmann111902f2014-06-21 04:53:17 +0200683 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800684 buf[1] = '\n';
685 buf[2] = '\0';
686 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
687}
688
689static ssize_t force_static_address_write(struct file *file,
690 const char __user *user_buf,
691 size_t count, loff_t *ppos)
692{
693 struct hci_dev *hdev = file->private_data;
694 char buf[32];
695 size_t buf_size = min(count, (sizeof(buf)-1));
696 bool enable;
697
698 if (test_bit(HCI_UP, &hdev->flags))
699 return -EBUSY;
700
701 if (copy_from_user(buf, user_buf, buf_size))
702 return -EFAULT;
703
704 buf[buf_size] = '\0';
705 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700706 return -EINVAL;
707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700710
Marcel Holtmann111902f2014-06-21 04:53:17 +0200711 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800712
713 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700714}
715
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800716static const struct file_operations force_static_address_fops = {
717 .open = simple_open,
718 .read = force_static_address_read,
719 .write = force_static_address_write,
720 .llseek = default_llseek,
721};
Marcel Holtmann92202182013-10-18 16:38:10 -0700722
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800723static int white_list_show(struct seq_file *f, void *ptr)
724{
725 struct hci_dev *hdev = f->private;
726 struct bdaddr_list *b;
727
728 hci_dev_lock(hdev);
729 list_for_each_entry(b, &hdev->le_white_list, list)
730 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731 hci_dev_unlock(hdev);
732
733 return 0;
734}
735
736static int white_list_open(struct inode *inode, struct file *file)
737{
738 return single_open(file, white_list_show, inode->i_private);
739}
740
741static const struct file_operations white_list_fops = {
742 .open = white_list_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release,
746};
747
Marcel Holtmann3698d702014-02-18 21:54:49 -0800748static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200751 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800752
Johan Hedbergadae20c2014-11-13 14:37:48 +0200753 rcu_read_lock();
754 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800755 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
756 &irk->bdaddr, irk->addr_type,
757 16, irk->val, &irk->rpa);
758 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200759 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800760
761 return 0;
762}
763
764static int identity_resolving_keys_open(struct inode *inode, struct file *file)
765{
766 return single_open(file, identity_resolving_keys_show,
767 inode->i_private);
768}
769
770static const struct file_operations identity_resolving_keys_fops = {
771 .open = identity_resolving_keys_open,
772 .read = seq_read,
773 .llseek = seq_lseek,
774 .release = single_release,
775};
776
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777static int long_term_keys_show(struct seq_file *f, void *ptr)
778{
779 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200780 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700781
Johan Hedberg970d0f12014-11-13 14:37:47 +0200782 rcu_read_lock();
783 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800784 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700785 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
786 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800787 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200788 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700789
790 return 0;
791}
792
793static int long_term_keys_open(struct inode *inode, struct file *file)
794{
795 return single_open(file, long_term_keys_show, inode->i_private);
796}
797
798static const struct file_operations long_term_keys_fops = {
799 .open = long_term_keys_open,
800 .read = seq_read,
801 .llseek = seq_lseek,
802 .release = single_release,
803};
804
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700805static int conn_min_interval_set(void *data, u64 val)
806{
807 struct hci_dev *hdev = data;
808
809 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
810 return -EINVAL;
811
812 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700813 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700814 hci_dev_unlock(hdev);
815
816 return 0;
817}
818
819static int conn_min_interval_get(void *data, u64 *val)
820{
821 struct hci_dev *hdev = data;
822
823 hci_dev_lock(hdev);
824 *val = hdev->le_conn_min_interval;
825 hci_dev_unlock(hdev);
826
827 return 0;
828}
829
830DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
831 conn_min_interval_set, "%llu\n");
832
833static int conn_max_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_max_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_max_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
859 conn_max_interval_set, "%llu\n");
860
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200861static int conn_latency_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val > 0x01f3)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
869 hdev->le_conn_latency = val;
870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_latency_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_latency;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
887 conn_latency_set, "%llu\n");
888
Marcel Holtmannf1649572014-06-30 12:34:38 +0200889static int supervision_timeout_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val < 0x000a || val > 0x0c80)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_supv_timeout = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int supervision_timeout_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_supv_timeout;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
915 supervision_timeout_set, "%llu\n");
916
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800917static int adv_channel_map_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x01 || val > 0x07)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_adv_channel_map = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int adv_channel_map_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_adv_channel_map;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
943 adv_channel_map_set, "%llu\n");
944
Georg Lukas729a1052014-07-26 13:59:58 +0200945static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200946{
Georg Lukas729a1052014-07-26 13:59:58 +0200947 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200948
Georg Lukas729a1052014-07-26 13:59:58 +0200949 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200950 return -EINVAL;
951
Andre Guedes7d474e02014-02-26 20:21:54 -0300952 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200953 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
Georg Lukas729a1052014-07-26 13:59:58 +0200959static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300960{
Georg Lukas729a1052014-07-26 13:59:58 +0200961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_min_interval;
965 hci_dev_unlock(hdev);
966
967 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300968}
969
Georg Lukas729a1052014-07-26 13:59:58 +0200970DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
971 adv_min_interval_set, "%llu\n");
972
973static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300974{
Georg Lukas729a1052014-07-26 13:59:58 +0200975 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300976
Georg Lukas729a1052014-07-26 13:59:58 +0200977 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -0300978 return -EINVAL;
979
Georg Lukas729a1052014-07-26 13:59:58 +0200980 hci_dev_lock(hdev);
981 hdev->le_adv_max_interval = val;
982 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300983
Georg Lukas729a1052014-07-26 13:59:58 +0200984 return 0;
985}
Andre Guedes7d474e02014-02-26 20:21:54 -0300986
Georg Lukas729a1052014-07-26 13:59:58 +0200987static int adv_max_interval_get(void *data, u64 *val)
988{
989 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300990
Georg Lukas729a1052014-07-26 13:59:58 +0200991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_max_interval;
993 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300994
Georg Lukas729a1052014-07-26 13:59:58 +0200995 return 0;
996}
Andre Guedes7d474e02014-02-26 20:21:54 -0300997
Georg Lukas729a1052014-07-26 13:59:58 +0200998DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
999 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001000
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001001static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001002{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001003 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001004 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001005 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001006
Andre Guedes7d474e02014-02-26 20:21:54 -03001007 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001008 list_for_each_entry(b, &hdev->whitelist, list)
1009 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001010 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001011 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001012 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001013 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001014 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001015
Andre Guedes7d474e02014-02-26 20:21:54 -03001016 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001017}
1018
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001019static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001020{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001021 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001022}
1023
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001024static const struct file_operations device_list_fops = {
1025 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001026 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001027 .llseek = seq_lseek,
1028 .release = single_release,
1029};
1030
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031/* ---- HCI requests ---- */
1032
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037 if (hdev->req_status == HCI_REQ_PEND) {
1038 hdev->req_result = result;
1039 hdev->req_status = HCI_REQ_DONE;
1040 wake_up_interruptible(&hdev->req_wait_q);
1041 }
1042}
1043
1044static void hci_req_cancel(struct hci_dev *hdev, int err)
1045{
1046 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1047
1048 if (hdev->req_status == HCI_REQ_PEND) {
1049 hdev->req_result = err;
1050 hdev->req_status = HCI_REQ_CANCELED;
1051 wake_up_interruptible(&hdev->req_wait_q);
1052 }
1053}
1054
Fengguang Wu77a63e02013-04-20 16:24:31 +03001055static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1056 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001057{
1058 struct hci_ev_cmd_complete *ev;
1059 struct hci_event_hdr *hdr;
1060 struct sk_buff *skb;
1061
1062 hci_dev_lock(hdev);
1063
1064 skb = hdev->recv_evt;
1065 hdev->recv_evt = NULL;
1066
1067 hci_dev_unlock(hdev);
1068
1069 if (!skb)
1070 return ERR_PTR(-ENODATA);
1071
1072 if (skb->len < sizeof(*hdr)) {
1073 BT_ERR("Too short HCI event");
1074 goto failed;
1075 }
1076
1077 hdr = (void *) skb->data;
1078 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1079
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001080 if (event) {
1081 if (hdr->evt != event)
1082 goto failed;
1083 return skb;
1084 }
1085
Johan Hedberg75e84b72013-04-02 13:35:04 +03001086 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1087 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1088 goto failed;
1089 }
1090
1091 if (skb->len < sizeof(*ev)) {
1092 BT_ERR("Too short cmd_complete event");
1093 goto failed;
1094 }
1095
1096 ev = (void *) skb->data;
1097 skb_pull(skb, sizeof(*ev));
1098
1099 if (opcode == __le16_to_cpu(ev->opcode))
1100 return skb;
1101
1102 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1103 __le16_to_cpu(ev->opcode));
1104
1105failed:
1106 kfree_skb(skb);
1107 return ERR_PTR(-ENODATA);
1108}
1109
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001110struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001111 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001112{
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct hci_request req;
1115 int err = 0;
1116
1117 BT_DBG("%s", hdev->name);
1118
1119 hci_req_init(&req, hdev);
1120
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001122
1123 hdev->req_status = HCI_REQ_PEND;
1124
Johan Hedberg75e84b72013-04-02 13:35:04 +03001125 add_wait_queue(&hdev->req_wait_q, &wait);
1126 set_current_state(TASK_INTERRUPTIBLE);
1127
Chan-yeol Park039fada2014-10-31 14:23:06 +09001128 err = hci_req_run(&req, hci_req_sync_complete);
1129 if (err < 0) {
1130 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001131 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001132 return ERR_PTR(err);
1133 }
1134
Johan Hedberg75e84b72013-04-02 13:35:04 +03001135 schedule_timeout(timeout);
1136
1137 remove_wait_queue(&hdev->req_wait_q, &wait);
1138
1139 if (signal_pending(current))
1140 return ERR_PTR(-EINTR);
1141
1142 switch (hdev->req_status) {
1143 case HCI_REQ_DONE:
1144 err = -bt_to_errno(hdev->req_result);
1145 break;
1146
1147 case HCI_REQ_CANCELED:
1148 err = -hdev->req_result;
1149 break;
1150
1151 default:
1152 err = -ETIMEDOUT;
1153 break;
1154 }
1155
1156 hdev->req_status = hdev->req_result = 0;
1157
1158 BT_DBG("%s end: err %d", hdev->name, err);
1159
1160 if (err < 0)
1161 return ERR_PTR(err);
1162
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001163 return hci_get_cmd_complete(hdev, opcode, event);
1164}
1165EXPORT_SYMBOL(__hci_cmd_sync_ev);
1166
1167struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001168 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001169{
1170 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001171}
1172EXPORT_SYMBOL(__hci_cmd_sync);
1173
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001175static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001176 void (*func)(struct hci_request *req,
1177 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001178 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001180 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 DECLARE_WAITQUEUE(wait, current);
1182 int err = 0;
1183
1184 BT_DBG("%s start", hdev->name);
1185
Johan Hedberg42c6b122013-03-05 20:37:49 +02001186 hci_req_init(&req, hdev);
1187
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 hdev->req_status = HCI_REQ_PEND;
1189
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001191
Chan-yeol Park039fada2014-10-31 14:23:06 +09001192 add_wait_queue(&hdev->req_wait_q, &wait);
1193 set_current_state(TASK_INTERRUPTIBLE);
1194
Johan Hedberg42c6b122013-03-05 20:37:49 +02001195 err = hci_req_run(&req, hci_req_sync_complete);
1196 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001197 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001198
Chan-yeol Park039fada2014-10-31 14:23:06 +09001199 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001200 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001201
Andre Guedes920c8302013-03-08 11:20:15 -03001202 /* ENODATA means the HCI request command queue is empty.
1203 * This can happen when a request with conditionals doesn't
1204 * trigger any commands to be sent. This is normal behavior
1205 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 */
Andre Guedes920c8302013-03-08 11:20:15 -03001207 if (err == -ENODATA)
1208 return 0;
1209
1210 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001211 }
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 schedule_timeout(timeout);
1214
1215 remove_wait_queue(&hdev->req_wait_q, &wait);
1216
1217 if (signal_pending(current))
1218 return -EINTR;
1219
1220 switch (hdev->req_status) {
1221 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001222 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 break;
1224
1225 case HCI_REQ_CANCELED:
1226 err = -hdev->req_result;
1227 break;
1228
1229 default:
1230 err = -ETIMEDOUT;
1231 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Johan Hedberga5040ef2011-01-10 13:28:59 +02001234 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
1236 BT_DBG("%s end: err %d", hdev->name, err);
1237
1238 return err;
1239}
1240
Johan Hedberg01178cd2013-03-05 20:37:41 +02001241static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 void (*req)(struct hci_request *req,
1243 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001244 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245{
1246 int ret;
1247
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001248 if (!test_bit(HCI_UP, &hdev->flags))
1249 return -ENETDOWN;
1250
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 /* Serialize all requests */
1252 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001253 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 hci_req_unlock(hdev);
1255
1256 return ret;
1257}
1258
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
1263 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 set_bit(HCI_RESET, &req->hdev->flags);
1265 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266}
1267
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001271
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001275 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001276 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001277
1278 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280}
1281
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001283{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001284 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001285
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001286 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001288
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001289 /* Read Local Supported Commands */
1290 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1291
1292 /* Read Local Supported Features */
1293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1294
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001295 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001297
1298 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001299 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001300
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001301 /* Read Flow Control Mode */
1302 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1303
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001304 /* Read Location Data */
1305 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001306}
1307
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001311
1312 BT_DBG("%s %ld", hdev->name, opt);
1313
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001314 /* Reset */
1315 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001316 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001317
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001318 switch (hdev->dev_type) {
1319 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001321 break;
1322
1323 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001324 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001325 break;
1326
1327 default:
1328 BT_ERR("Unknown device type %d", hdev->dev_type);
1329 break;
1330 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001331}
1332
Johan Hedberg42c6b122013-03-05 20:37:49 +02001333static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001334{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001335 struct hci_dev *hdev = req->hdev;
1336
Johan Hedberg2177bab2013-03-05 20:37:43 +02001337 __le16 param;
1338 __u8 flt_type;
1339
1340 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001341 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342
1343 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345
1346 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001348
1349 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001350 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001351
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001352 /* Read Number of Supported IAC */
1353 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1354
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001355 /* Read Current IAC LAP */
1356 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1357
Johan Hedberg2177bab2013-03-05 20:37:43 +02001358 /* Clear Event Filters */
1359 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001360 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001361
1362 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001363 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001365
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001366 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1367 * but it does not support page scan related HCI commands.
1368 */
1369 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1371 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1372 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001373}
1374
Johan Hedberg42c6b122013-03-05 20:37:49 +02001375static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001376{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001377 struct hci_dev *hdev = req->hdev;
1378
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001380 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001381
1382 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001385 /* Read LE Supported States */
1386 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1387
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001389 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001390
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001391 /* Clear LE White List */
1392 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001393
1394 /* LE-only controllers have LE implicitly enabled */
1395 if (!lmp_bredr_capable(hdev))
1396 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397}
1398
1399static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1400{
1401 if (lmp_ext_inq_capable(hdev))
1402 return 0x02;
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 return 0x01;
1406
1407 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1408 hdev->lmp_subver == 0x0757)
1409 return 0x01;
1410
1411 if (hdev->manufacturer == 15) {
1412 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1415 return 0x01;
1416 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1417 return 0x01;
1418 }
1419
1420 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1421 hdev->lmp_subver == 0x1805)
1422 return 0x01;
1423
1424 return 0x00;
1425}
1426
Johan Hedberg42c6b122013-03-05 20:37:49 +02001427static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001428{
1429 u8 mode;
1430
Johan Hedberg42c6b122013-03-05 20:37:49 +02001431 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001432
Johan Hedberg42c6b122013-03-05 20:37:49 +02001433 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001434}
1435
Johan Hedberg42c6b122013-03-05 20:37:49 +02001436static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001437{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001438 struct hci_dev *hdev = req->hdev;
1439
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440 /* The second byte is 0xff instead of 0x9f (two reserved bits
1441 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1442 * command otherwise.
1443 */
1444 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1445
1446 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1447 * any event mask for pre 1.2 devices.
1448 */
1449 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1450 return;
1451
1452 if (lmp_bredr_capable(hdev)) {
1453 events[4] |= 0x01; /* Flow Specification Complete */
1454 events[4] |= 0x02; /* Inquiry Result with RSSI */
1455 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1456 events[5] |= 0x08; /* Synchronous Connection Complete */
1457 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001458 } else {
1459 /* Use a different default for LE-only devices */
1460 memset(events, 0, sizeof(events));
1461 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001462 events[1] |= 0x08; /* Read Remote Version Information Complete */
1463 events[1] |= 0x20; /* Command Complete */
1464 events[1] |= 0x40; /* Command Status */
1465 events[1] |= 0x80; /* Hardware Error */
1466 events[2] |= 0x04; /* Number of Completed Packets */
1467 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001468
1469 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1470 events[0] |= 0x80; /* Encryption Change */
1471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1472 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001473 }
1474
1475 if (lmp_inq_rssi_capable(hdev))
1476 events[4] |= 0x02; /* Inquiry Result with RSSI */
1477
1478 if (lmp_sniffsubr_capable(hdev))
1479 events[5] |= 0x20; /* Sniff Subrating */
1480
1481 if (lmp_pause_enc_capable(hdev))
1482 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1483
1484 if (lmp_ext_inq_capable(hdev))
1485 events[5] |= 0x40; /* Extended Inquiry Result */
1486
1487 if (lmp_no_flush_capable(hdev))
1488 events[7] |= 0x01; /* Enhanced Flush Complete */
1489
1490 if (lmp_lsto_capable(hdev))
1491 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1492
1493 if (lmp_ssp_capable(hdev)) {
1494 events[6] |= 0x01; /* IO Capability Request */
1495 events[6] |= 0x02; /* IO Capability Response */
1496 events[6] |= 0x04; /* User Confirmation Request */
1497 events[6] |= 0x08; /* User Passkey Request */
1498 events[6] |= 0x10; /* Remote OOB Data Request */
1499 events[6] |= 0x20; /* Simple Pairing Complete */
1500 events[7] |= 0x04; /* User Passkey Notification */
1501 events[7] |= 0x08; /* Keypress Notification */
1502 events[7] |= 0x10; /* Remote Host Supported
1503 * Features Notification
1504 */
1505 }
1506
1507 if (lmp_le_capable(hdev))
1508 events[7] |= 0x20; /* LE Meta-Event */
1509
Johan Hedberg42c6b122013-03-05 20:37:49 +02001510 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001511}
1512
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001515 struct hci_dev *hdev = req->hdev;
1516
Johan Hedberg2177bab2013-03-05 20:37:43 +02001517 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001518 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001519 else
1520 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001521
1522 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001523 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001524
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001525 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1526 * local supported commands HCI command.
1527 */
1528 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001529 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001530
1531 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001532 /* When SSP is available, then the host features page
1533 * should also be available as well. However some
1534 * controllers list the max_page as 0 as long as SSP
1535 * has not been enabled. To achieve proper debugging
1536 * output, force the minimum max_page to 1 at least.
1537 */
1538 hdev->max_page = 0x01;
1539
Johan Hedberg2177bab2013-03-05 20:37:43 +02001540 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1541 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1543 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544 } else {
1545 struct hci_cp_write_eir cp;
1546
1547 memset(hdev->eir, 0, sizeof(hdev->eir));
1548 memset(&cp, 0, sizeof(cp));
1549
Johan Hedberg42c6b122013-03-05 20:37:49 +02001550 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001551 }
1552 }
1553
1554 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001556
1557 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001558 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559
1560 if (lmp_ext_feat_capable(hdev)) {
1561 struct hci_cp_read_local_ext_features cp;
1562
1563 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001564 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1565 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001566 }
1567
1568 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1569 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001570 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1571 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001572 }
1573}
1574
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001577 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001578 struct hci_cp_write_def_link_policy cp;
1579 u16 link_policy = 0;
1580
1581 if (lmp_rswitch_capable(hdev))
1582 link_policy |= HCI_LP_RSWITCH;
1583 if (lmp_hold_capable(hdev))
1584 link_policy |= HCI_LP_HOLD;
1585 if (lmp_sniff_capable(hdev))
1586 link_policy |= HCI_LP_SNIFF;
1587 if (lmp_park_capable(hdev))
1588 link_policy |= HCI_LP_PARK;
1589
1590 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001591 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001592}
1593
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597 struct hci_cp_write_le_host_supported cp;
1598
Johan Hedbergc73eee92013-04-19 18:35:21 +03001599 /* LE-only devices do not support explicit enablement */
1600 if (!lmp_bredr_capable(hdev))
1601 return;
1602
Johan Hedberg2177bab2013-03-05 20:37:43 +02001603 memset(&cp, 0, sizeof(cp));
1604
1605 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1606 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001607 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001608 }
1609
1610 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001611 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1612 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001613}
1614
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001615static void hci_set_event_mask_page_2(struct hci_request *req)
1616{
1617 struct hci_dev *hdev = req->hdev;
1618 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1619
1620 /* If Connectionless Slave Broadcast master role is supported
1621 * enable all necessary events for it.
1622 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001623 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001624 events[1] |= 0x40; /* Triggered Clock Capture */
1625 events[1] |= 0x80; /* Synchronization Train Complete */
1626 events[2] |= 0x10; /* Slave Page Response Timeout */
1627 events[2] |= 0x20; /* CSB Channel Map Change */
1628 }
1629
1630 /* If Connectionless Slave Broadcast slave role is supported
1631 * enable all necessary events for it.
1632 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001633 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001634 events[2] |= 0x01; /* Synchronization Train Received */
1635 events[2] |= 0x02; /* CSB Receive */
1636 events[2] |= 0x04; /* CSB Timeout */
1637 events[2] |= 0x08; /* Truncated Page Complete */
1638 }
1639
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001640 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001641 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001642 events[2] |= 0x80;
1643
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001644 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1645}
1646
Johan Hedberg42c6b122013-03-05 20:37:49 +02001647static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001648{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001650 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001651
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001652 hci_setup_event_mask(req);
1653
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001654 /* Some Broadcom based Bluetooth controllers do not support the
1655 * Delete Stored Link Key command. They are clearly indicating its
1656 * absence in the bit mask of supported commands.
1657 *
1658 * Check the supported commands and only if the the command is marked
1659 * as supported send it. If not supported assume that the controller
1660 * does not have actual support for stored link keys which makes this
1661 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001662 *
1663 * Some controllers indicate that they support handling deleting
1664 * stored link keys, but they don't. The quirk lets a driver
1665 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001666 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001667 if (hdev->commands[6] & 0x80 &&
1668 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001669 struct hci_cp_delete_stored_link_key cp;
1670
1671 bacpy(&cp.bdaddr, BDADDR_ANY);
1672 cp.delete_all = 0x01;
1673 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1674 sizeof(cp), &cp);
1675 }
1676
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001678 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001679
Andre Guedes9193c6e2014-07-01 18:10:09 -03001680 if (lmp_le_capable(hdev)) {
1681 u8 events[8];
1682
1683 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001684 events[0] = 0x0f;
1685
1686 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1687 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001688
1689 /* If controller supports the Connection Parameters Request
1690 * Link Layer Procedure, enable the corresponding event.
1691 */
1692 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1693 events[0] |= 0x20; /* LE Remote Connection
1694 * Parameter Request
1695 */
1696
Andre Guedes9193c6e2014-07-01 18:10:09 -03001697 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1698 events);
1699
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001700 if (hdev->commands[25] & 0x40) {
1701 /* Read LE Advertising Channel TX Power */
1702 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1703 }
1704
Johan Hedberg42c6b122013-03-05 20:37:49 +02001705 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001706 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001707
1708 /* Read features beyond page 1 if available */
1709 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1710 struct hci_cp_read_local_ext_features cp;
1711
1712 cp.page = p;
1713 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1714 sizeof(cp), &cp);
1715 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001716}
1717
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001718static void hci_init4_req(struct hci_request *req, unsigned long opt)
1719{
1720 struct hci_dev *hdev = req->hdev;
1721
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001722 /* Set event mask page 2 if the HCI command for it is supported */
1723 if (hdev->commands[22] & 0x04)
1724 hci_set_event_mask_page_2(req);
1725
Marcel Holtmann109e3192014-07-23 19:24:56 +02001726 /* Read local codec list if the HCI command is supported */
1727 if (hdev->commands[29] & 0x20)
1728 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1729
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001730 /* Get MWS transport configuration if the HCI command is supported */
1731 if (hdev->commands[30] & 0x08)
1732 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1733
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001734 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001735 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001736 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001737
1738 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001739 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001740 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001741 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1742 u8 support = 0x01;
1743 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1744 sizeof(support), &support);
1745 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001746}
1747
Johan Hedberg2177bab2013-03-05 20:37:43 +02001748static int __hci_init(struct hci_dev *hdev)
1749{
1750 int err;
1751
1752 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1753 if (err < 0)
1754 return err;
1755
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001756 /* The Device Under Test (DUT) mode is special and available for
1757 * all controller types. So just create it early on.
1758 */
1759 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1760 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1761 &dut_mode_fops);
1762 }
1763
Johan Hedberg2177bab2013-03-05 20:37:43 +02001764 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1765 * BR/EDR/LE type controllers. AMP controllers only need the
1766 * first stage init.
1767 */
1768 if (hdev->dev_type != HCI_BREDR)
1769 return 0;
1770
1771 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001775 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1776 if (err < 0)
1777 return err;
1778
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001779 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1780 if (err < 0)
1781 return err;
1782
1783 /* Only create debugfs entries during the initial setup
1784 * phase and not every time the controller gets powered on.
1785 */
1786 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1787 return 0;
1788
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001789 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1790 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001791 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1792 &hdev->manufacturer);
1793 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1794 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001795 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1796 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001797 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1798 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001799 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1800
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001801 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1802 &conn_info_min_age_fops);
1803 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1804 &conn_info_max_age_fops);
1805
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001806 if (lmp_bredr_capable(hdev)) {
1807 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1808 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001809 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1810 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001811 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1812 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001813 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1814 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001815 }
1816
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001817 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001818 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1819 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001820 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1821 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001822 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1823 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001824 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001825
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001826 if (lmp_sniff_capable(hdev)) {
1827 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1828 hdev, &idle_timeout_fops);
1829 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1830 hdev, &sniff_min_interval_fops);
1831 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1832 hdev, &sniff_max_interval_fops);
1833 }
1834
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001835 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001836 debugfs_create_file("identity", 0400, hdev->debugfs,
1837 hdev, &identity_fops);
1838 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1839 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001840 debugfs_create_file("random_address", 0444, hdev->debugfs,
1841 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001842 debugfs_create_file("static_address", 0444, hdev->debugfs,
1843 hdev, &static_address_fops);
1844
1845 /* For controllers with a public address, provide a debug
1846 * option to force the usage of the configured static
1847 * address. By default the public address is used.
1848 */
1849 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1850 debugfs_create_file("force_static_address", 0644,
1851 hdev->debugfs, hdev,
1852 &force_static_address_fops);
1853
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001854 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1855 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001856 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1857 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001858 debugfs_create_file("identity_resolving_keys", 0400,
1859 hdev->debugfs, hdev,
1860 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001861 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1862 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001863 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1864 hdev, &conn_min_interval_fops);
1865 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1866 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001867 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1868 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001869 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1870 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001871 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1872 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001873 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1874 hdev, &adv_min_interval_fops);
1875 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1876 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001877 debugfs_create_u16("discov_interleaved_timeout", 0644,
1878 hdev->debugfs,
1879 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001880
Johan Hedberg711eafe2014-08-08 09:32:52 +03001881 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001882 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001883
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001884 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001885}
1886
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001887static void hci_init0_req(struct hci_request *req, unsigned long opt)
1888{
1889 struct hci_dev *hdev = req->hdev;
1890
1891 BT_DBG("%s %ld", hdev->name, opt);
1892
1893 /* Reset */
1894 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1895 hci_reset_req(req, 0);
1896
1897 /* Read Local Version */
1898 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1899
1900 /* Read BD Address */
1901 if (hdev->set_bdaddr)
1902 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1903}
1904
1905static int __hci_unconf_init(struct hci_dev *hdev)
1906{
1907 int err;
1908
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001909 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1910 return 0;
1911
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001912 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1913 if (err < 0)
1914 return err;
1915
1916 return 0;
1917}
1918
Johan Hedberg42c6b122013-03-05 20:37:49 +02001919static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920{
1921 __u8 scan = opt;
1922
Johan Hedberg42c6b122013-03-05 20:37:49 +02001923 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
1925 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001926 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927}
1928
Johan Hedberg42c6b122013-03-05 20:37:49 +02001929static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930{
1931 __u8 auth = opt;
1932
Johan Hedberg42c6b122013-03-05 20:37:49 +02001933 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
1935 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001936 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937}
1938
Johan Hedberg42c6b122013-03-05 20:37:49 +02001939static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940{
1941 __u8 encrypt = opt;
1942
Johan Hedberg42c6b122013-03-05 20:37:49 +02001943 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001945 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001946 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947}
1948
Johan Hedberg42c6b122013-03-05 20:37:49 +02001949static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001950{
1951 __le16 policy = cpu_to_le16(opt);
1952
Johan Hedberg42c6b122013-03-05 20:37:49 +02001953 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001954
1955 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001956 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001957}
1958
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001959/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 * Device is held on return. */
1961struct hci_dev *hci_dev_get(int index)
1962{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001963 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
1965 BT_DBG("%d", index);
1966
1967 if (index < 0)
1968 return NULL;
1969
1970 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001971 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 if (d->id == index) {
1973 hdev = hci_dev_hold(d);
1974 break;
1975 }
1976 }
1977 read_unlock(&hci_dev_list_lock);
1978 return hdev;
1979}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
1981/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001982
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001983bool hci_discovery_active(struct hci_dev *hdev)
1984{
1985 struct discovery_state *discov = &hdev->discovery;
1986
Andre Guedes6fbe1952012-02-03 17:47:58 -03001987 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001988 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001989 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001990 return true;
1991
Andre Guedes6fbe1952012-02-03 17:47:58 -03001992 default:
1993 return false;
1994 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001995}
1996
Johan Hedbergff9ef572012-01-04 14:23:45 +02001997void hci_discovery_set_state(struct hci_dev *hdev, int state)
1998{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001999 int old_state = hdev->discovery.state;
2000
Johan Hedbergff9ef572012-01-04 14:23:45 +02002001 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2002
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002003 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002004 return;
2005
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002006 hdev->discovery.state = state;
2007
Johan Hedbergff9ef572012-01-04 14:23:45 +02002008 switch (state) {
2009 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002010 hci_update_background_scan(hdev);
2011
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002012 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002013 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002014 break;
2015 case DISCOVERY_STARTING:
2016 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002017 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002018 mgmt_discovering(hdev, 1);
2019 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002020 case DISCOVERY_RESOLVING:
2021 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002022 case DISCOVERY_STOPPING:
2023 break;
2024 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002025}
2026
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002027void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028{
Johan Hedberg30883512012-01-04 14:16:21 +02002029 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002030 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
Johan Hedberg561aafb2012-01-04 13:31:59 +02002032 list_for_each_entry_safe(p, n, &cache->all, all) {
2033 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002034 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002036
2037 INIT_LIST_HEAD(&cache->unknown);
2038 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039}
2040
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002041struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2042 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043{
Johan Hedberg30883512012-01-04 14:16:21 +02002044 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 struct inquiry_entry *e;
2046
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002047 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
Johan Hedberg561aafb2012-01-04 13:31:59 +02002049 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002051 return e;
2052 }
2053
2054 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055}
2056
Johan Hedberg561aafb2012-01-04 13:31:59 +02002057struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002058 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002059{
Johan Hedberg30883512012-01-04 14:16:21 +02002060 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002061 struct inquiry_entry *e;
2062
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002063 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002064
2065 list_for_each_entry(e, &cache->unknown, list) {
2066 if (!bacmp(&e->data.bdaddr, bdaddr))
2067 return e;
2068 }
2069
2070 return NULL;
2071}
2072
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002073struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002074 bdaddr_t *bdaddr,
2075 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002076{
2077 struct discovery_state *cache = &hdev->discovery;
2078 struct inquiry_entry *e;
2079
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002080 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002081
2082 list_for_each_entry(e, &cache->resolve, list) {
2083 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2084 return e;
2085 if (!bacmp(&e->data.bdaddr, bdaddr))
2086 return e;
2087 }
2088
2089 return NULL;
2090}
2091
Johan Hedberga3d4e202012-01-09 00:53:02 +02002092void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002093 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002094{
2095 struct discovery_state *cache = &hdev->discovery;
2096 struct list_head *pos = &cache->resolve;
2097 struct inquiry_entry *p;
2098
2099 list_del(&ie->list);
2100
2101 list_for_each_entry(p, &cache->resolve, list) {
2102 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002103 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002104 break;
2105 pos = &p->list;
2106 }
2107
2108 list_add(&ie->list, pos);
2109}
2110
Marcel Holtmannaf589252014-07-01 14:11:20 +02002111u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2112 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113{
Johan Hedberg30883512012-01-04 14:16:21 +02002114 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002115 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002116 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002118 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
Szymon Janc2b2fec42012-11-20 11:38:54 +01002120 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2121
Marcel Holtmannaf589252014-07-01 14:11:20 +02002122 if (!data->ssp_mode)
2123 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002124
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002125 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002126 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002127 if (!ie->data.ssp_mode)
2128 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002129
Johan Hedberga3d4e202012-01-09 00:53:02 +02002130 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002131 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002132 ie->data.rssi = data->rssi;
2133 hci_inquiry_cache_update_resolve(hdev, ie);
2134 }
2135
Johan Hedberg561aafb2012-01-04 13:31:59 +02002136 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002137 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002138
Johan Hedberg561aafb2012-01-04 13:31:59 +02002139 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002140 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002141 if (!ie) {
2142 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2143 goto done;
2144 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002145
2146 list_add(&ie->all, &cache->all);
2147
2148 if (name_known) {
2149 ie->name_state = NAME_KNOWN;
2150 } else {
2151 ie->name_state = NAME_NOT_KNOWN;
2152 list_add(&ie->list, &cache->unknown);
2153 }
2154
2155update:
2156 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002157 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002158 ie->name_state = NAME_KNOWN;
2159 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 }
2161
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002162 memcpy(&ie->data, data, sizeof(*data));
2163 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002165
2166 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002167 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002168
Marcel Holtmannaf589252014-07-01 14:11:20 +02002169done:
2170 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171}
2172
2173static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2174{
Johan Hedberg30883512012-01-04 14:16:21 +02002175 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 struct inquiry_info *info = (struct inquiry_info *) buf;
2177 struct inquiry_entry *e;
2178 int copied = 0;
2179
Johan Hedberg561aafb2012-01-04 13:31:59 +02002180 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002182
2183 if (copied >= num)
2184 break;
2185
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 bacpy(&info->bdaddr, &data->bdaddr);
2187 info->pscan_rep_mode = data->pscan_rep_mode;
2188 info->pscan_period_mode = data->pscan_period_mode;
2189 info->pscan_mode = data->pscan_mode;
2190 memcpy(info->dev_class, data->dev_class, 3);
2191 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002192
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002194 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 }
2196
2197 BT_DBG("cache %p, copied %d", cache, copied);
2198 return copied;
2199}
2200
Johan Hedberg42c6b122013-03-05 20:37:49 +02002201static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202{
2203 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002204 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 struct hci_cp_inquiry cp;
2206
2207 BT_DBG("%s", hdev->name);
2208
2209 if (test_bit(HCI_INQUIRY, &hdev->flags))
2210 return;
2211
2212 /* Start Inquiry */
2213 memcpy(&cp.lap, &ir->lap, 3);
2214 cp.length = ir->length;
2215 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002216 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217}
2218
2219int hci_inquiry(void __user *arg)
2220{
2221 __u8 __user *ptr = arg;
2222 struct hci_inquiry_req ir;
2223 struct hci_dev *hdev;
2224 int err = 0, do_inquiry = 0, max_rsp;
2225 long timeo;
2226 __u8 *buf;
2227
2228 if (copy_from_user(&ir, ptr, sizeof(ir)))
2229 return -EFAULT;
2230
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002231 hdev = hci_dev_get(ir.dev_id);
2232 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 return -ENODEV;
2234
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002235 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2236 err = -EBUSY;
2237 goto done;
2238 }
2239
Marcel Holtmann4a964402014-07-02 19:10:33 +02002240 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002241 err = -EOPNOTSUPP;
2242 goto done;
2243 }
2244
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002245 if (hdev->dev_type != HCI_BREDR) {
2246 err = -EOPNOTSUPP;
2247 goto done;
2248 }
2249
Johan Hedberg56f87902013-10-02 13:43:13 +03002250 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2251 err = -EOPNOTSUPP;
2252 goto done;
2253 }
2254
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002255 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002256 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002257 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002258 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 do_inquiry = 1;
2260 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002261 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262
Marcel Holtmann04837f62006-07-03 10:02:33 +02002263 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002264
2265 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002266 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2267 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002268 if (err < 0)
2269 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002270
2271 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2272 * cleared). If it is interrupted by a signal, return -EINTR.
2273 */
NeilBrown74316202014-07-07 15:16:04 +10002274 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002275 TASK_INTERRUPTIBLE))
2276 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002277 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002279 /* for unlimited number of responses we will use buffer with
2280 * 255 entries
2281 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2283
2284 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2285 * copy it to the user space.
2286 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002287 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002288 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 err = -ENOMEM;
2290 goto done;
2291 }
2292
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002293 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002295 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
2297 BT_DBG("num_rsp %d", ir.num_rsp);
2298
2299 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2300 ptr += sizeof(ir);
2301 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002302 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002304 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 err = -EFAULT;
2306
2307 kfree(buf);
2308
2309done:
2310 hci_dev_put(hdev);
2311 return err;
2312}
2313
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002314static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 int ret = 0;
2317
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 BT_DBG("%s %p", hdev->name, hdev);
2319
2320 hci_req_lock(hdev);
2321
Johan Hovold94324962012-03-15 14:48:41 +01002322 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2323 ret = -ENODEV;
2324 goto done;
2325 }
2326
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002327 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2328 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002329 /* Check for rfkill but allow the HCI setup stage to
2330 * proceed (which in itself doesn't cause any RF activity).
2331 */
2332 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2333 ret = -ERFKILL;
2334 goto done;
2335 }
2336
2337 /* Check for valid public address or a configured static
2338 * random adddress, but let the HCI setup proceed to
2339 * be able to determine if there is a public address
2340 * or not.
2341 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002342 * In case of user channel usage, it is not important
2343 * if a public address or static random address is
2344 * available.
2345 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002346 * This check is only valid for BR/EDR controllers
2347 * since AMP controllers do not have an address.
2348 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002349 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2350 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002351 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2352 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2353 ret = -EADDRNOTAVAIL;
2354 goto done;
2355 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002356 }
2357
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 if (test_bit(HCI_UP, &hdev->flags)) {
2359 ret = -EALREADY;
2360 goto done;
2361 }
2362
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 if (hdev->open(hdev)) {
2364 ret = -EIO;
2365 goto done;
2366 }
2367
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002368 atomic_set(&hdev->cmd_cnt, 1);
2369 set_bit(HCI_INIT, &hdev->flags);
2370
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002371 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2372 if (hdev->setup)
2373 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002374
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002375 /* The transport driver can set these quirks before
2376 * creating the HCI device or in its setup callback.
2377 *
2378 * In case any of them is set, the controller has to
2379 * start up as unconfigured.
2380 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002381 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2382 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002383 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002384
2385 /* For an unconfigured controller it is required to
2386 * read at least the version information provided by
2387 * the Read Local Version Information command.
2388 *
2389 * If the set_bdaddr driver callback is provided, then
2390 * also the original Bluetooth public device address
2391 * will be read using the Read BD Address command.
2392 */
2393 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2394 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002395 }
2396
Marcel Holtmann9713c172014-07-06 12:11:15 +02002397 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2398 /* If public address change is configured, ensure that
2399 * the address gets programmed. If the driver does not
2400 * support changing the public address, fail the power
2401 * on procedure.
2402 */
2403 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2404 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002405 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2406 else
2407 ret = -EADDRNOTAVAIL;
2408 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002409
2410 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002411 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002412 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002413 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 }
2415
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002416 clear_bit(HCI_INIT, &hdev->flags);
2417
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 if (!ret) {
2419 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002420 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 set_bit(HCI_UP, &hdev->flags);
2422 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002423 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002424 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002425 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002426 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002427 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002428 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002429 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002430 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002431 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002432 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002434 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002435 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002436 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437
2438 skb_queue_purge(&hdev->cmd_q);
2439 skb_queue_purge(&hdev->rx_q);
2440
2441 if (hdev->flush)
2442 hdev->flush(hdev);
2443
2444 if (hdev->sent_cmd) {
2445 kfree_skb(hdev->sent_cmd);
2446 hdev->sent_cmd = NULL;
2447 }
2448
2449 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002450 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 }
2452
2453done:
2454 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 return ret;
2456}
2457
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002458/* ---- HCI ioctl helpers ---- */
2459
2460int hci_dev_open(__u16 dev)
2461{
2462 struct hci_dev *hdev;
2463 int err;
2464
2465 hdev = hci_dev_get(dev);
2466 if (!hdev)
2467 return -ENODEV;
2468
Marcel Holtmann4a964402014-07-02 19:10:33 +02002469 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002470 * up as user channel. Trying to bring them up as normal devices
2471 * will result into a failure. Only user channel operation is
2472 * possible.
2473 *
2474 * When this function is called for a user channel, the flag
2475 * HCI_USER_CHANNEL will be set first before attempting to
2476 * open the device.
2477 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002478 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002479 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2480 err = -EOPNOTSUPP;
2481 goto done;
2482 }
2483
Johan Hedberge1d08f42013-10-01 22:44:50 +03002484 /* We need to ensure that no other power on/off work is pending
2485 * before proceeding to call hci_dev_do_open. This is
2486 * particularly important if the setup procedure has not yet
2487 * completed.
2488 */
2489 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2490 cancel_delayed_work(&hdev->power_off);
2491
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002492 /* After this call it is guaranteed that the setup procedure
2493 * has finished. This means that error conditions like RFKILL
2494 * or no valid public or static random address apply.
2495 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002496 flush_workqueue(hdev->req_workqueue);
2497
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002498 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002499 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002500 * so that pairing works for them. Once the management interface
2501 * is in use this bit will be cleared again and userspace has
2502 * to explicitly enable it.
2503 */
2504 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2505 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002506 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002507
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002508 err = hci_dev_do_open(hdev);
2509
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002510done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002511 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002512 return err;
2513}
2514
Johan Hedbergd7347f32014-07-04 12:37:23 +03002515/* This function requires the caller holds hdev->lock */
2516static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2517{
2518 struct hci_conn_params *p;
2519
Johan Hedbergf161dd42014-08-15 21:06:54 +03002520 list_for_each_entry(p, &hdev->le_conn_params, list) {
2521 if (p->conn) {
2522 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002523 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002524 p->conn = NULL;
2525 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002526 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002527 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002528
2529 BT_DBG("All LE pending actions cleared");
2530}
2531
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532static int hci_dev_do_close(struct hci_dev *hdev)
2533{
2534 BT_DBG("%s %p", hdev->name, hdev);
2535
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002536 cancel_delayed_work(&hdev->power_off);
2537
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 hci_req_cancel(hdev, ENODEV);
2539 hci_req_lock(hdev);
2540
2541 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002542 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 hci_req_unlock(hdev);
2544 return 0;
2545 }
2546
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002547 /* Flush RX and TX works */
2548 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002549 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002551 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002552 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002553 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002554 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002555 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002556 }
2557
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002558 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002559 cancel_delayed_work(&hdev->service_cache);
2560
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002561 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002562
2563 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2564 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002565
Johan Hedberg76727c02014-11-18 09:00:14 +02002566 /* Avoid potential lockdep warnings from the *_flush() calls by
2567 * ensuring the workqueue is empty up front.
2568 */
2569 drain_workqueue(hdev->workqueue);
2570
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002571 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002572 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002573 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002574 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002575 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576
2577 hci_notify(hdev, HCI_DEV_DOWN);
2578
2579 if (hdev->flush)
2580 hdev->flush(hdev);
2581
2582 /* Reset device */
2583 skb_queue_purge(&hdev->cmd_q);
2584 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002585 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2586 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002587 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002589 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 clear_bit(HCI_INIT, &hdev->flags);
2591 }
2592
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002593 /* flush cmd work */
2594 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
2596 /* Drop queues */
2597 skb_queue_purge(&hdev->rx_q);
2598 skb_queue_purge(&hdev->cmd_q);
2599 skb_queue_purge(&hdev->raw_q);
2600
2601 /* Drop last sent command */
2602 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002603 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 kfree_skb(hdev->sent_cmd);
2605 hdev->sent_cmd = NULL;
2606 }
2607
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002608 kfree_skb(hdev->recv_evt);
2609 hdev->recv_evt = NULL;
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 /* After this point our queues are empty
2612 * and no tasks are scheduled. */
2613 hdev->close(hdev);
2614
Johan Hedberg35b973c2013-03-15 17:06:59 -05002615 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002616 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002617 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2618
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002619 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2620 if (hdev->dev_type == HCI_BREDR) {
2621 hci_dev_lock(hdev);
2622 mgmt_powered(hdev, 0);
2623 hci_dev_unlock(hdev);
2624 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002625 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002626
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002627 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002628 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002629
Johan Hedberge59fda82012-02-22 18:11:53 +02002630 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002631 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002632 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002633
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 hci_req_unlock(hdev);
2635
2636 hci_dev_put(hdev);
2637 return 0;
2638}
2639
2640int hci_dev_close(__u16 dev)
2641{
2642 struct hci_dev *hdev;
2643 int err;
2644
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002645 hdev = hci_dev_get(dev);
2646 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002648
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002649 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2650 err = -EBUSY;
2651 goto done;
2652 }
2653
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002654 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2655 cancel_delayed_work(&hdev->power_off);
2656
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002658
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002659done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 hci_dev_put(hdev);
2661 return err;
2662}
2663
2664int hci_dev_reset(__u16 dev)
2665{
2666 struct hci_dev *hdev;
2667 int ret = 0;
2668
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002669 hdev = hci_dev_get(dev);
2670 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 return -ENODEV;
2672
2673 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674
Marcel Holtmann808a0492013-08-26 20:57:58 -07002675 if (!test_bit(HCI_UP, &hdev->flags)) {
2676 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002678 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002680 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2681 ret = -EBUSY;
2682 goto done;
2683 }
2684
Marcel Holtmann4a964402014-07-02 19:10:33 +02002685 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002686 ret = -EOPNOTSUPP;
2687 goto done;
2688 }
2689
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 /* Drop queues */
2691 skb_queue_purge(&hdev->rx_q);
2692 skb_queue_purge(&hdev->cmd_q);
2693
Johan Hedberg76727c02014-11-18 09:00:14 +02002694 /* Avoid potential lockdep warnings from the *_flush() calls by
2695 * ensuring the workqueue is empty up front.
2696 */
2697 drain_workqueue(hdev->workqueue);
2698
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002699 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002700 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002702 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
2704 if (hdev->flush)
2705 hdev->flush(hdev);
2706
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002707 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002708 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002710 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711
2712done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 hci_req_unlock(hdev);
2714 hci_dev_put(hdev);
2715 return ret;
2716}
2717
2718int hci_dev_reset_stat(__u16 dev)
2719{
2720 struct hci_dev *hdev;
2721 int ret = 0;
2722
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002723 hdev = hci_dev_get(dev);
2724 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 return -ENODEV;
2726
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002727 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2728 ret = -EBUSY;
2729 goto done;
2730 }
2731
Marcel Holtmann4a964402014-07-02 19:10:33 +02002732 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002733 ret = -EOPNOTSUPP;
2734 goto done;
2735 }
2736
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2738
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002739done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 return ret;
2742}
2743
Johan Hedberg123abc02014-07-10 12:09:07 +03002744static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2745{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002746 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002747
2748 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2749
2750 if ((scan & SCAN_PAGE))
2751 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2752 &hdev->dev_flags);
2753 else
2754 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2755 &hdev->dev_flags);
2756
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002757 if ((scan & SCAN_INQUIRY)) {
2758 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2759 &hdev->dev_flags);
2760 } else {
2761 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2762 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2763 &hdev->dev_flags);
2764 }
2765
Johan Hedberg123abc02014-07-10 12:09:07 +03002766 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2767 return;
2768
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002769 if (conn_changed || discov_changed) {
2770 /* In case this was disabled through mgmt */
2771 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2772
2773 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2774 mgmt_update_adv_data(hdev);
2775
Johan Hedberg123abc02014-07-10 12:09:07 +03002776 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002777 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002778}
2779
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780int hci_dev_cmd(unsigned int cmd, void __user *arg)
2781{
2782 struct hci_dev *hdev;
2783 struct hci_dev_req dr;
2784 int err = 0;
2785
2786 if (copy_from_user(&dr, arg, sizeof(dr)))
2787 return -EFAULT;
2788
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002789 hdev = hci_dev_get(dr.dev_id);
2790 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 return -ENODEV;
2792
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002793 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2794 err = -EBUSY;
2795 goto done;
2796 }
2797
Marcel Holtmann4a964402014-07-02 19:10:33 +02002798 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002799 err = -EOPNOTSUPP;
2800 goto done;
2801 }
2802
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002803 if (hdev->dev_type != HCI_BREDR) {
2804 err = -EOPNOTSUPP;
2805 goto done;
2806 }
2807
Johan Hedberg56f87902013-10-02 13:43:13 +03002808 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2809 err = -EOPNOTSUPP;
2810 goto done;
2811 }
2812
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 switch (cmd) {
2814 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002815 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2816 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 break;
2818
2819 case HCISETENCRYPT:
2820 if (!lmp_encrypt_capable(hdev)) {
2821 err = -EOPNOTSUPP;
2822 break;
2823 }
2824
2825 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2826 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002827 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2828 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 if (err)
2830 break;
2831 }
2832
Johan Hedberg01178cd2013-03-05 20:37:41 +02002833 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2834 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 break;
2836
2837 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002838 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2839 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002840
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002841 /* Ensure that the connectable and discoverable states
2842 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002843 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002844 if (!err)
2845 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 break;
2847
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002848 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002849 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2850 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002851 break;
2852
2853 case HCISETLINKMODE:
2854 hdev->link_mode = ((__u16) dr.dev_opt) &
2855 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2856 break;
2857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 case HCISETPTYPE:
2859 hdev->pkt_type = (__u16) dr.dev_opt;
2860 break;
2861
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002863 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2864 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 break;
2866
2867 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002868 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2869 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 break;
2871
2872 default:
2873 err = -EINVAL;
2874 break;
2875 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002876
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002877done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 hci_dev_put(hdev);
2879 return err;
2880}
2881
2882int hci_get_dev_list(void __user *arg)
2883{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002884 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 struct hci_dev_list_req *dl;
2886 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 int n = 0, size, err;
2888 __u16 dev_num;
2889
2890 if (get_user(dev_num, (__u16 __user *) arg))
2891 return -EFAULT;
2892
2893 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2894 return -EINVAL;
2895
2896 size = sizeof(*dl) + dev_num * sizeof(*dr);
2897
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002898 dl = kzalloc(size, GFP_KERNEL);
2899 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 return -ENOMEM;
2901
2902 dr = dl->dev_req;
2903
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002904 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002905 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002906 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002907
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002908 /* When the auto-off is configured it means the transport
2909 * is running, but in that case still indicate that the
2910 * device is actually down.
2911 */
2912 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2913 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002914
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002916 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002917
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 if (++n >= dev_num)
2919 break;
2920 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002921 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
2923 dl->dev_num = n;
2924 size = sizeof(*dl) + n * sizeof(*dr);
2925
2926 err = copy_to_user(arg, dl, size);
2927 kfree(dl);
2928
2929 return err ? -EFAULT : 0;
2930}
2931
2932int hci_get_dev_info(void __user *arg)
2933{
2934 struct hci_dev *hdev;
2935 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002936 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 int err = 0;
2938
2939 if (copy_from_user(&di, arg, sizeof(di)))
2940 return -EFAULT;
2941
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002942 hdev = hci_dev_get(di.dev_id);
2943 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 return -ENODEV;
2945
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002946 /* When the auto-off is configured it means the transport
2947 * is running, but in that case still indicate that the
2948 * device is actually down.
2949 */
2950 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2951 flags = hdev->flags & ~BIT(HCI_UP);
2952 else
2953 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002954
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 strcpy(di.name, hdev->name);
2956 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002957 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002958 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002960 if (lmp_bredr_capable(hdev)) {
2961 di.acl_mtu = hdev->acl_mtu;
2962 di.acl_pkts = hdev->acl_pkts;
2963 di.sco_mtu = hdev->sco_mtu;
2964 di.sco_pkts = hdev->sco_pkts;
2965 } else {
2966 di.acl_mtu = hdev->le_mtu;
2967 di.acl_pkts = hdev->le_pkts;
2968 di.sco_mtu = 0;
2969 di.sco_pkts = 0;
2970 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 di.link_policy = hdev->link_policy;
2972 di.link_mode = hdev->link_mode;
2973
2974 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2975 memcpy(&di.features, &hdev->features, sizeof(di.features));
2976
2977 if (copy_to_user(arg, &di, sizeof(di)))
2978 err = -EFAULT;
2979
2980 hci_dev_put(hdev);
2981
2982 return err;
2983}
2984
2985/* ---- Interface to HCI drivers ---- */
2986
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002987static int hci_rfkill_set_block(void *data, bool blocked)
2988{
2989 struct hci_dev *hdev = data;
2990
2991 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2992
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002993 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2994 return -EBUSY;
2995
Johan Hedberg5e130362013-09-13 08:58:17 +03002996 if (blocked) {
2997 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002998 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2999 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003000 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003001 } else {
3002 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003003 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003004
3005 return 0;
3006}
3007
3008static const struct rfkill_ops hci_rfkill_ops = {
3009 .set_block = hci_rfkill_set_block,
3010};
3011
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003012static void hci_power_on(struct work_struct *work)
3013{
3014 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003015 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003016
3017 BT_DBG("%s", hdev->name);
3018
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003019 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003020 if (err < 0) {
3021 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003022 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003023 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003024
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003025 /* During the HCI setup phase, a few error conditions are
3026 * ignored and they need to be checked now. If they are still
3027 * valid, it is important to turn the device back off.
3028 */
3029 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003030 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003031 (hdev->dev_type == HCI_BREDR &&
3032 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3033 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003034 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3035 hci_dev_do_close(hdev);
3036 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003037 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3038 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003039 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003040
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003041 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003042 /* For unconfigured devices, set the HCI_RAW flag
3043 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003044 */
3045 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3046 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003047
3048 /* For fully configured devices, this will send
3049 * the Index Added event. For unconfigured devices,
3050 * it will send Unconfigued Index Added event.
3051 *
3052 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3053 * and no event will be send.
3054 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003055 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003056 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003057 /* When the controller is now configured, then it
3058 * is important to clear the HCI_RAW flag.
3059 */
3060 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3061 clear_bit(HCI_RAW, &hdev->flags);
3062
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003063 /* Powering on the controller with HCI_CONFIG set only
3064 * happens with the transition from unconfigured to
3065 * configured. This will send the Index Added event.
3066 */
3067 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003068 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003069}
3070
3071static void hci_power_off(struct work_struct *work)
3072{
Johan Hedberg32435532011-11-07 22:16:04 +02003073 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003074 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003075
3076 BT_DBG("%s", hdev->name);
3077
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003078 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003079}
3080
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003081static void hci_discov_off(struct work_struct *work)
3082{
3083 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003084
3085 hdev = container_of(work, struct hci_dev, discov_off.work);
3086
3087 BT_DBG("%s", hdev->name);
3088
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003089 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003090}
3091
Johan Hedberg35f74982014-02-18 17:14:32 +02003092void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003093{
Johan Hedberg48210022013-01-27 00:31:28 +02003094 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003095
Johan Hedberg48210022013-01-27 00:31:28 +02003096 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3097 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003098 kfree(uuid);
3099 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003100}
3101
Johan Hedberg35f74982014-02-18 17:14:32 +02003102void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003103{
3104 struct list_head *p, *n;
3105
3106 list_for_each_safe(p, n, &hdev->link_keys) {
3107 struct link_key *key;
3108
3109 key = list_entry(p, struct link_key, list);
3110
3111 list_del(p);
3112 kfree(key);
3113 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003114}
3115
Johan Hedberg35f74982014-02-18 17:14:32 +02003116void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003117{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003118 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003119
Johan Hedberg970d0f12014-11-13 14:37:47 +02003120 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3121 list_del_rcu(&k->list);
3122 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003123 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003124}
3125
Johan Hedberg970c4e42014-02-18 10:19:33 +02003126void hci_smp_irks_clear(struct hci_dev *hdev)
3127{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003128 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003129
Johan Hedbergadae20c2014-11-13 14:37:48 +02003130 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3131 list_del_rcu(&k->list);
3132 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003133 }
3134}
3135
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003136struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3137{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003138 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003139
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003140 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003141 if (bacmp(bdaddr, &k->bdaddr) == 0)
3142 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003143
3144 return NULL;
3145}
3146
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303147static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003148 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003149{
3150 /* Legacy key */
3151 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303152 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003153
3154 /* Debug keys are insecure so don't store them persistently */
3155 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303156 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003157
3158 /* Changed combination key and there's no previous one */
3159 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303160 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003161
3162 /* Security mode 3 case */
3163 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303164 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003165
3166 /* Neither local nor remote side had no-bonding as requirement */
3167 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303168 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003169
3170 /* Local side had dedicated bonding as requirement */
3171 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303172 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003173
3174 /* Remote side had dedicated bonding as requirement */
3175 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303176 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003177
3178 /* If none of the above criteria match, then don't store the key
3179 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303180 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003181}
3182
Johan Hedberge804d252014-07-16 11:42:28 +03003183static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003184{
Johan Hedberge804d252014-07-16 11:42:28 +03003185 if (type == SMP_LTK)
3186 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003187
Johan Hedberge804d252014-07-16 11:42:28 +03003188 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003189}
3190
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003191struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003192 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003193{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003194 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003195
Johan Hedberg970d0f12014-11-13 14:37:47 +02003196 rcu_read_lock();
3197 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003198 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003199 continue;
3200
Johan Hedberge804d252014-07-16 11:42:28 +03003201 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003202 continue;
3203
Johan Hedberg970d0f12014-11-13 14:37:47 +02003204 rcu_read_unlock();
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003205 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003206 }
Johan Hedberg970d0f12014-11-13 14:37:47 +02003207 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003208
3209 return NULL;
3210}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003211
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003212struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003213 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003214{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003215 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003216
Johan Hedberg970d0f12014-11-13 14:37:47 +02003217 rcu_read_lock();
3218 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003219 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003220 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberg970d0f12014-11-13 14:37:47 +02003221 ltk_role(k->type) == role) {
3222 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003223 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003224 }
3225 }
3226 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003227
3228 return NULL;
3229}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003230
Johan Hedberg970c4e42014-02-18 10:19:33 +02003231struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3232{
3233 struct smp_irk *irk;
3234
Johan Hedbergadae20c2014-11-13 14:37:48 +02003235 rcu_read_lock();
3236 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3237 if (!bacmp(&irk->rpa, rpa)) {
3238 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003239 return irk;
3240 }
3241 }
3242
Johan Hedbergadae20c2014-11-13 14:37:48 +02003243 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3244 if (smp_irk_matches(hdev, irk->val, rpa)) {
3245 bacpy(&irk->rpa, rpa);
3246 rcu_read_unlock();
3247 return irk;
3248 }
3249 }
3250 rcu_read_unlock();
3251
Johan Hedberg970c4e42014-02-18 10:19:33 +02003252 return NULL;
3253}
3254
3255struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3256 u8 addr_type)
3257{
3258 struct smp_irk *irk;
3259
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003260 /* Identity Address must be public or static random */
3261 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3262 return NULL;
3263
Johan Hedbergadae20c2014-11-13 14:37:48 +02003264 rcu_read_lock();
3265 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003266 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003267 bacmp(bdaddr, &irk->bdaddr) == 0) {
3268 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003269 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003270 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003271 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003272 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003273
3274 return NULL;
3275}
3276
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003277struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003278 bdaddr_t *bdaddr, u8 *val, u8 type,
3279 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003280{
3281 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303282 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003283
3284 old_key = hci_find_link_key(hdev, bdaddr);
3285 if (old_key) {
3286 old_key_type = old_key->type;
3287 key = old_key;
3288 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003289 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003290 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003291 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003292 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003293 list_add(&key->list, &hdev->link_keys);
3294 }
3295
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003296 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003297
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003298 /* Some buggy controller combinations generate a changed
3299 * combination key for legacy pairing even when there's no
3300 * previous key */
3301 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003302 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003303 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003304 if (conn)
3305 conn->key_type = type;
3306 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003307
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003308 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003309 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003310 key->pin_len = pin_len;
3311
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003312 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003313 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003314 else
3315 key->type = type;
3316
Johan Hedberg7652ff62014-06-24 13:15:49 +03003317 if (persistent)
3318 *persistent = hci_persistent_key(hdev, conn, type,
3319 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003320
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003321 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003322}
3323
Johan Hedbergca9142b2014-02-19 14:57:44 +02003324struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003325 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003326 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003327{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003328 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003329 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003330
Johan Hedberge804d252014-07-16 11:42:28 +03003331 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003332 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003333 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003334 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003335 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003336 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003337 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003338 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003339 }
3340
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003341 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003342 key->bdaddr_type = addr_type;
3343 memcpy(key->val, tk, sizeof(key->val));
3344 key->authenticated = authenticated;
3345 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003346 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003347 key->enc_size = enc_size;
3348 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003349
Johan Hedbergca9142b2014-02-19 14:57:44 +02003350 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003351}
3352
Johan Hedbergca9142b2014-02-19 14:57:44 +02003353struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3354 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003355{
3356 struct smp_irk *irk;
3357
3358 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3359 if (!irk) {
3360 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3361 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003362 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003363
3364 bacpy(&irk->bdaddr, bdaddr);
3365 irk->addr_type = addr_type;
3366
Johan Hedbergadae20c2014-11-13 14:37:48 +02003367 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003368 }
3369
3370 memcpy(irk->val, val, 16);
3371 bacpy(&irk->rpa, rpa);
3372
Johan Hedbergca9142b2014-02-19 14:57:44 +02003373 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003374}
3375
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003376int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3377{
3378 struct link_key *key;
3379
3380 key = hci_find_link_key(hdev, bdaddr);
3381 if (!key)
3382 return -ENOENT;
3383
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003384 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003385
3386 list_del(&key->list);
3387 kfree(key);
3388
3389 return 0;
3390}
3391
Johan Hedberge0b2b272014-02-18 17:14:31 +02003392int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003393{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003394 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003395 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003396
Johan Hedberg970d0f12014-11-13 14:37:47 +02003397 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003398 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003399 continue;
3400
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003401 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003402
Johan Hedberg970d0f12014-11-13 14:37:47 +02003403 list_del_rcu(&k->list);
3404 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003405 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003406 }
3407
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003408 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003409}
3410
Johan Hedberga7ec7332014-02-18 17:14:35 +02003411void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3412{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003413 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003414
Johan Hedbergadae20c2014-11-13 14:37:48 +02003415 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003416 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3417 continue;
3418
3419 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3420
Johan Hedbergadae20c2014-11-13 14:37:48 +02003421 list_del_rcu(&k->list);
3422 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003423 }
3424}
3425
Ville Tervo6bd32322011-02-16 16:32:41 +02003426/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003427static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003428{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003429 struct hci_dev *hdev = container_of(work, struct hci_dev,
3430 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003431
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003432 if (hdev->sent_cmd) {
3433 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3434 u16 opcode = __le16_to_cpu(sent->opcode);
3435
3436 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3437 } else {
3438 BT_ERR("%s command tx timeout", hdev->name);
3439 }
3440
Ville Tervo6bd32322011-02-16 16:32:41 +02003441 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003442 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003443}
3444
Szymon Janc2763eda2011-03-22 13:12:22 +01003445struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003446 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003447{
3448 struct oob_data *data;
3449
3450 list_for_each_entry(data, &hdev->remote_oob_data, list)
3451 if (bacmp(bdaddr, &data->bdaddr) == 0)
3452 return data;
3453
3454 return NULL;
3455}
3456
3457int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3458{
3459 struct oob_data *data;
3460
3461 data = hci_find_remote_oob_data(hdev, bdaddr);
3462 if (!data)
3463 return -ENOENT;
3464
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003465 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003466
3467 list_del(&data->list);
3468 kfree(data);
3469
3470 return 0;
3471}
3472
Johan Hedberg35f74982014-02-18 17:14:32 +02003473void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003474{
3475 struct oob_data *data, *n;
3476
3477 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3478 list_del(&data->list);
3479 kfree(data);
3480 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003481}
3482
Marcel Holtmann07988722014-01-10 02:07:29 -08003483int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003484 u8 *hash, u8 *rand)
Szymon Janc2763eda2011-03-22 13:12:22 +01003485{
3486 struct oob_data *data;
3487
3488 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003489 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003490 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003491 if (!data)
3492 return -ENOMEM;
3493
3494 bacpy(&data->bdaddr, bdaddr);
3495 list_add(&data->list, &hdev->remote_oob_data);
3496 }
3497
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003498 memcpy(data->hash192, hash, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003499 memcpy(data->rand192, rand, sizeof(data->rand192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003500
Marcel Holtmann07988722014-01-10 02:07:29 -08003501 memset(data->hash256, 0, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003502 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003503
3504 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3505
3506 return 0;
3507}
3508
3509int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003510 u8 *hash192, u8 *rand192,
3511 u8 *hash256, u8 *rand256)
Marcel Holtmann07988722014-01-10 02:07:29 -08003512{
3513 struct oob_data *data;
3514
3515 data = hci_find_remote_oob_data(hdev, bdaddr);
3516 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003517 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003518 if (!data)
3519 return -ENOMEM;
3520
3521 bacpy(&data->bdaddr, bdaddr);
3522 list_add(&data->list, &hdev->remote_oob_data);
3523 }
3524
3525 memcpy(data->hash192, hash192, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003526 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003527
3528 memcpy(data->hash256, hash256, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003529 memcpy(data->rand256, rand256, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003530
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003531 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003532
3533 return 0;
3534}
3535
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003536struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003537 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003538{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003539 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003540
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003541 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003542 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003543 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003544 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003545
3546 return NULL;
3547}
3548
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003549void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003550{
3551 struct list_head *p, *n;
3552
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003553 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003554 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003555
3556 list_del(p);
3557 kfree(b);
3558 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003559}
3560
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003561int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003562{
3563 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003564
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003565 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003566 return -EBADF;
3567
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003568 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003569 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003570
Johan Hedberg27f70f32014-07-21 10:50:06 +03003571 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003572 if (!entry)
3573 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003574
3575 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003576 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003577
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003578 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003579
3580 return 0;
3581}
3582
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003583int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003584{
3585 struct bdaddr_list *entry;
3586
Johan Hedberg35f74982014-02-18 17:14:32 +02003587 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003588 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003589 return 0;
3590 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003591
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003592 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003593 if (!entry)
3594 return -ENOENT;
3595
3596 list_del(&entry->list);
3597 kfree(entry);
3598
3599 return 0;
3600}
3601
Andre Guedes15819a72014-02-03 13:56:18 -03003602/* This function requires the caller holds hdev->lock */
3603struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3604 bdaddr_t *addr, u8 addr_type)
3605{
3606 struct hci_conn_params *params;
3607
Johan Hedberg738f6182014-07-03 19:33:51 +03003608 /* The conn params list only contains identity addresses */
3609 if (!hci_is_identity_address(addr, addr_type))
3610 return NULL;
3611
Andre Guedes15819a72014-02-03 13:56:18 -03003612 list_for_each_entry(params, &hdev->le_conn_params, list) {
3613 if (bacmp(&params->addr, addr) == 0 &&
3614 params->addr_type == addr_type) {
3615 return params;
3616 }
3617 }
3618
3619 return NULL;
3620}
3621
Andre Guedescef952c2014-02-26 20:21:49 -03003622static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3623{
3624 struct hci_conn *conn;
3625
3626 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3627 if (!conn)
3628 return false;
3629
3630 if (conn->dst_type != type)
3631 return false;
3632
3633 if (conn->state != BT_CONNECTED)
3634 return false;
3635
3636 return true;
3637}
3638
Andre Guedes15819a72014-02-03 13:56:18 -03003639/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003640struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3641 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003642{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003643 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003644
Johan Hedberg738f6182014-07-03 19:33:51 +03003645 /* The list only contains identity addresses */
3646 if (!hci_is_identity_address(addr, addr_type))
3647 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003648
Johan Hedberg501f8822014-07-04 12:37:26 +03003649 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003650 if (bacmp(&param->addr, addr) == 0 &&
3651 param->addr_type == addr_type)
3652 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003653 }
3654
3655 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003656}
3657
3658/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003659struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3660 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003661{
3662 struct hci_conn_params *params;
3663
Johan Hedbergc46245b2014-07-02 17:37:33 +03003664 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003665 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003666
Andre Guedes15819a72014-02-03 13:56:18 -03003667 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003668 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003669 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003670
3671 params = kzalloc(sizeof(*params), GFP_KERNEL);
3672 if (!params) {
3673 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003674 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003675 }
3676
3677 bacpy(&params->addr, addr);
3678 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003679
3680 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003681 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003682
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003683 params->conn_min_interval = hdev->le_conn_min_interval;
3684 params->conn_max_interval = hdev->le_conn_max_interval;
3685 params->conn_latency = hdev->le_conn_latency;
3686 params->supervision_timeout = hdev->le_supv_timeout;
3687 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3688
3689 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3690
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003691 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003692}
3693
3694/* This function requires the caller holds hdev->lock */
3695int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003696 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003697{
3698 struct hci_conn_params *params;
3699
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003700 params = hci_conn_params_add(hdev, addr, addr_type);
3701 if (!params)
3702 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003703
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003704 if (params->auto_connect == auto_connect)
3705 return 0;
3706
Johan Hedberg95305ba2014-07-04 12:37:21 +03003707 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003708
Andre Guedescef952c2014-02-26 20:21:49 -03003709 switch (auto_connect) {
3710 case HCI_AUTO_CONN_DISABLED:
3711 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003712 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003713 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003714 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003715 list_add(&params->action, &hdev->pend_le_reports);
3716 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003717 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003718 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003719 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003720 if (!is_connected(hdev, addr, addr_type)) {
3721 list_add(&params->action, &hdev->pend_le_conns);
3722 hci_update_background_scan(hdev);
3723 }
Andre Guedescef952c2014-02-26 20:21:49 -03003724 break;
3725 }
Andre Guedes15819a72014-02-03 13:56:18 -03003726
Johan Hedberg851efca2014-07-02 22:42:00 +03003727 params->auto_connect = auto_connect;
3728
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003729 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3730 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003731
3732 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003733}
3734
Johan Hedbergf6c63242014-08-15 21:06:59 +03003735static void hci_conn_params_free(struct hci_conn_params *params)
3736{
3737 if (params->conn) {
3738 hci_conn_drop(params->conn);
3739 hci_conn_put(params->conn);
3740 }
3741
3742 list_del(&params->action);
3743 list_del(&params->list);
3744 kfree(params);
3745}
3746
Andre Guedes15819a72014-02-03 13:56:18 -03003747/* This function requires the caller holds hdev->lock */
3748void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3749{
3750 struct hci_conn_params *params;
3751
3752 params = hci_conn_params_lookup(hdev, addr, addr_type);
3753 if (!params)
3754 return;
3755
Johan Hedbergf6c63242014-08-15 21:06:59 +03003756 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003757
Johan Hedberg95305ba2014-07-04 12:37:21 +03003758 hci_update_background_scan(hdev);
3759
Andre Guedes15819a72014-02-03 13:56:18 -03003760 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3761}
3762
3763/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003764void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003765{
3766 struct hci_conn_params *params, *tmp;
3767
3768 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003769 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3770 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003771 list_del(&params->list);
3772 kfree(params);
3773 }
3774
Johan Hedberg55af49a82014-07-02 17:37:26 +03003775 BT_DBG("All LE disabled connection parameters were removed");
3776}
3777
3778/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003779void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003780{
3781 struct hci_conn_params *params, *tmp;
3782
Johan Hedbergf6c63242014-08-15 21:06:59 +03003783 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3784 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003785
Johan Hedberga2f41a82014-07-04 12:37:19 +03003786 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003787
Andre Guedes15819a72014-02-03 13:56:18 -03003788 BT_DBG("All LE connection parameters were removed");
3789}
3790
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003791static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003792{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003793 if (status) {
3794 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003795
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003796 hci_dev_lock(hdev);
3797 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3798 hci_dev_unlock(hdev);
3799 return;
3800 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003801}
3802
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003803static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003804{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003805 /* General inquiry access code (GIAC) */
3806 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3807 struct hci_request req;
3808 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003809 int err;
3810
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003811 if (status) {
3812 BT_ERR("Failed to disable LE scanning: status %d", status);
3813 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003814 }
3815
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003816 switch (hdev->discovery.type) {
3817 case DISCOV_TYPE_LE:
3818 hci_dev_lock(hdev);
3819 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3820 hci_dev_unlock(hdev);
3821 break;
3822
3823 case DISCOV_TYPE_INTERLEAVED:
3824 hci_req_init(&req, hdev);
3825
3826 memset(&cp, 0, sizeof(cp));
3827 memcpy(&cp.lap, lap, sizeof(cp.lap));
3828 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3829 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3830
3831 hci_dev_lock(hdev);
3832
3833 hci_inquiry_cache_flush(hdev);
3834
3835 err = hci_req_run(&req, inquiry_complete);
3836 if (err) {
3837 BT_ERR("Inquiry request failed: err %d", err);
3838 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3839 }
3840
3841 hci_dev_unlock(hdev);
3842 break;
3843 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003844}
3845
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003846static void le_scan_disable_work(struct work_struct *work)
3847{
3848 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003849 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003850 struct hci_request req;
3851 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003852
3853 BT_DBG("%s", hdev->name);
3854
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003855 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003856
Andre Guedesb1efcc22014-02-26 20:21:40 -03003857 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003858
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003859 err = hci_req_run(&req, le_scan_disable_work_complete);
3860 if (err)
3861 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003862}
3863
Johan Hedberg8d972502014-02-28 12:54:14 +02003864static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3865{
3866 struct hci_dev *hdev = req->hdev;
3867
3868 /* If we're advertising or initiating an LE connection we can't
3869 * go ahead and change the random address at this time. This is
3870 * because the eventual initiator address used for the
3871 * subsequently created connection will be undefined (some
3872 * controllers use the new address and others the one we had
3873 * when the operation started).
3874 *
3875 * In this kind of scenario skip the update and let the random
3876 * address be updated at the next cycle.
3877 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003878 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003879 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3880 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003881 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003882 return;
3883 }
3884
3885 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3886}
3887
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003888int hci_update_random_address(struct hci_request *req, bool require_privacy,
3889 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003890{
3891 struct hci_dev *hdev = req->hdev;
3892 int err;
3893
3894 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003895 * current RPA has expired or there is something else than
3896 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003897 */
3898 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003899 int to;
3900
3901 *own_addr_type = ADDR_LE_DEV_RANDOM;
3902
3903 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003904 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003905 return 0;
3906
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003907 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003908 if (err < 0) {
3909 BT_ERR("%s failed to generate new RPA", hdev->name);
3910 return err;
3911 }
3912
Johan Hedberg8d972502014-02-28 12:54:14 +02003913 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003914
3915 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3916 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3917
3918 return 0;
3919 }
3920
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003921 /* In case of required privacy without resolvable private address,
3922 * use an unresolvable private address. This is useful for active
3923 * scanning and non-connectable advertising.
3924 */
3925 if (require_privacy) {
3926 bdaddr_t urpa;
3927
3928 get_random_bytes(&urpa, 6);
3929 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3930
3931 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003932 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003933 return 0;
3934 }
3935
Johan Hedbergebd3a742014-02-23 19:42:21 +02003936 /* If forcing static address is in use or there is no public
3937 * address use the static address as random address (but skip
3938 * the HCI command if the current random address is already the
3939 * static one.
3940 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003941 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003942 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3943 *own_addr_type = ADDR_LE_DEV_RANDOM;
3944 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3945 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3946 &hdev->static_addr);
3947 return 0;
3948 }
3949
3950 /* Neither privacy nor static address is being used so use a
3951 * public address.
3952 */
3953 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3954
3955 return 0;
3956}
3957
Johan Hedberga1f4c312014-02-27 14:05:41 +02003958/* Copy the Identity Address of the controller.
3959 *
3960 * If the controller has a public BD_ADDR, then by default use that one.
3961 * If this is a LE only controller without a public address, default to
3962 * the static random address.
3963 *
3964 * For debugging purposes it is possible to force controllers with a
3965 * public address to use the static random address instead.
3966 */
3967void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3968 u8 *bdaddr_type)
3969{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003970 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003971 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3972 bacpy(bdaddr, &hdev->static_addr);
3973 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3974 } else {
3975 bacpy(bdaddr, &hdev->bdaddr);
3976 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3977 }
3978}
3979
David Herrmann9be0dab2012-04-22 14:39:57 +02003980/* Alloc HCI device */
3981struct hci_dev *hci_alloc_dev(void)
3982{
3983 struct hci_dev *hdev;
3984
Johan Hedberg27f70f32014-07-21 10:50:06 +03003985 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003986 if (!hdev)
3987 return NULL;
3988
David Herrmannb1b813d2012-04-22 14:39:58 +02003989 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3990 hdev->esco_type = (ESCO_HV1);
3991 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003992 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3993 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003994 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003995 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3996 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003997
David Herrmannb1b813d2012-04-22 14:39:58 +02003998 hdev->sniff_max_interval = 800;
3999 hdev->sniff_min_interval = 80;
4000
Marcel Holtmann3f959d42014-02-20 11:55:56 -08004001 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004002 hdev->le_adv_min_interval = 0x0800;
4003 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004004 hdev->le_scan_interval = 0x0060;
4005 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004006 hdev->le_conn_min_interval = 0x0028;
4007 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004008 hdev->le_conn_latency = 0x0000;
4009 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004010
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004011 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004012 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004013 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4014 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004015
David Herrmannb1b813d2012-04-22 14:39:58 +02004016 mutex_init(&hdev->lock);
4017 mutex_init(&hdev->req_lock);
4018
4019 INIT_LIST_HEAD(&hdev->mgmt_pending);
4020 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004021 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004022 INIT_LIST_HEAD(&hdev->uuids);
4023 INIT_LIST_HEAD(&hdev->link_keys);
4024 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004025 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004026 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004027 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004028 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004029 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004030 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004031 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004032
4033 INIT_WORK(&hdev->rx_work, hci_rx_work);
4034 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4035 INIT_WORK(&hdev->tx_work, hci_tx_work);
4036 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004037
David Herrmannb1b813d2012-04-22 14:39:58 +02004038 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4039 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4040 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4041
David Herrmannb1b813d2012-04-22 14:39:58 +02004042 skb_queue_head_init(&hdev->rx_q);
4043 skb_queue_head_init(&hdev->cmd_q);
4044 skb_queue_head_init(&hdev->raw_q);
4045
4046 init_waitqueue_head(&hdev->req_wait_q);
4047
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004048 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004049
David Herrmannb1b813d2012-04-22 14:39:58 +02004050 hci_init_sysfs(hdev);
4051 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004052
4053 return hdev;
4054}
4055EXPORT_SYMBOL(hci_alloc_dev);
4056
4057/* Free HCI device */
4058void hci_free_dev(struct hci_dev *hdev)
4059{
David Herrmann9be0dab2012-04-22 14:39:57 +02004060 /* will free via device release */
4061 put_device(&hdev->dev);
4062}
4063EXPORT_SYMBOL(hci_free_dev);
4064
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065/* Register HCI device */
4066int hci_register_dev(struct hci_dev *hdev)
4067{
David Herrmannb1b813d2012-04-22 14:39:58 +02004068 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069
Marcel Holtmann74292d52014-07-06 15:50:27 +02004070 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 return -EINVAL;
4072
Mat Martineau08add512011-11-02 16:18:36 -07004073 /* Do not allow HCI_AMP devices to register at index 0,
4074 * so the index can be used as the AMP controller ID.
4075 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004076 switch (hdev->dev_type) {
4077 case HCI_BREDR:
4078 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4079 break;
4080 case HCI_AMP:
4081 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4082 break;
4083 default:
4084 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004086
Sasha Levin3df92b32012-05-27 22:36:56 +02004087 if (id < 0)
4088 return id;
4089
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 sprintf(hdev->name, "hci%d", id);
4091 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004092
4093 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4094
Kees Cookd8537542013-07-03 15:04:57 -07004095 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4096 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004097 if (!hdev->workqueue) {
4098 error = -ENOMEM;
4099 goto err;
4100 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004101
Kees Cookd8537542013-07-03 15:04:57 -07004102 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4103 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004104 if (!hdev->req_workqueue) {
4105 destroy_workqueue(hdev->workqueue);
4106 error = -ENOMEM;
4107 goto err;
4108 }
4109
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004110 if (!IS_ERR_OR_NULL(bt_debugfs))
4111 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4112
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004113 dev_set_name(&hdev->dev, "%s", hdev->name);
4114
4115 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004116 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004117 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004119 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004120 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4121 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004122 if (hdev->rfkill) {
4123 if (rfkill_register(hdev->rfkill) < 0) {
4124 rfkill_destroy(hdev->rfkill);
4125 hdev->rfkill = NULL;
4126 }
4127 }
4128
Johan Hedberg5e130362013-09-13 08:58:17 +03004129 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4130 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4131
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004132 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004133 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004134
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004135 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004136 /* Assume BR/EDR support until proven otherwise (such as
4137 * through reading supported features during init.
4138 */
4139 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4140 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004141
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004142 write_lock(&hci_dev_list_lock);
4143 list_add(&hdev->list, &hci_dev_list);
4144 write_unlock(&hci_dev_list_lock);
4145
Marcel Holtmann4a964402014-07-02 19:10:33 +02004146 /* Devices that are marked for raw-only usage are unconfigured
4147 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004148 */
4149 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004150 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004151
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004153 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154
Johan Hedberg19202572013-01-14 22:33:51 +02004155 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004156
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004158
David Herrmann33ca9542011-10-08 14:58:49 +02004159err_wqueue:
4160 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004161 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004162err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004163 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004164
David Herrmann33ca9542011-10-08 14:58:49 +02004165 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166}
4167EXPORT_SYMBOL(hci_register_dev);
4168
4169/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004170void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171{
Sasha Levin3df92b32012-05-27 22:36:56 +02004172 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004173
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004174 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175
Johan Hovold94324962012-03-15 14:48:41 +01004176 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4177
Sasha Levin3df92b32012-05-27 22:36:56 +02004178 id = hdev->id;
4179
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004180 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004182 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183
4184 hci_dev_do_close(hdev);
4185
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304186 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004187 kfree_skb(hdev->reassembly[i]);
4188
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004189 cancel_work_sync(&hdev->power_on);
4190
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004191 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004192 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4193 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004194 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004195 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004196 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004197 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004198
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004199 /* mgmt_index_removed should take care of emptying the
4200 * pending list */
4201 BUG_ON(!list_empty(&hdev->mgmt_pending));
4202
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203 hci_notify(hdev, HCI_DEV_UNREG);
4204
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004205 if (hdev->rfkill) {
4206 rfkill_unregister(hdev->rfkill);
4207 rfkill_destroy(hdev->rfkill);
4208 }
4209
Johan Hedberg711eafe2014-08-08 09:32:52 +03004210 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004211
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004212 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004213
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004214 debugfs_remove_recursive(hdev->debugfs);
4215
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004216 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004217 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004218
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004219 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004220 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004221 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004222 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004223 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004224 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004225 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004226 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004227 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004228 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004229 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004230
David Herrmanndc946bd2012-01-07 15:47:24 +01004231 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004232
4233 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234}
4235EXPORT_SYMBOL(hci_unregister_dev);
4236
4237/* Suspend HCI device */
4238int hci_suspend_dev(struct hci_dev *hdev)
4239{
4240 hci_notify(hdev, HCI_DEV_SUSPEND);
4241 return 0;
4242}
4243EXPORT_SYMBOL(hci_suspend_dev);
4244
4245/* Resume HCI device */
4246int hci_resume_dev(struct hci_dev *hdev)
4247{
4248 hci_notify(hdev, HCI_DEV_RESUME);
4249 return 0;
4250}
4251EXPORT_SYMBOL(hci_resume_dev);
4252
Marcel Holtmann75e05692014-11-02 08:15:38 +01004253/* Reset HCI device */
4254int hci_reset_dev(struct hci_dev *hdev)
4255{
4256 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4257 struct sk_buff *skb;
4258
4259 skb = bt_skb_alloc(3, GFP_ATOMIC);
4260 if (!skb)
4261 return -ENOMEM;
4262
4263 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4264 memcpy(skb_put(skb, 3), hw_err, 3);
4265
4266 /* Send Hardware Error to upper stack */
4267 return hci_recv_frame(hdev, skb);
4268}
4269EXPORT_SYMBOL(hci_reset_dev);
4270
Marcel Holtmann76bca882009-11-18 00:40:39 +01004271/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004272int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004273{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004274 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004275 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004276 kfree_skb(skb);
4277 return -ENXIO;
4278 }
4279
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004280 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004281 bt_cb(skb)->incoming = 1;
4282
4283 /* Time stamp */
4284 __net_timestamp(skb);
4285
Marcel Holtmann76bca882009-11-18 00:40:39 +01004286 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004287 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004288
Marcel Holtmann76bca882009-11-18 00:40:39 +01004289 return 0;
4290}
4291EXPORT_SYMBOL(hci_recv_frame);
4292
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304293static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004294 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304295{
4296 int len = 0;
4297 int hlen = 0;
4298 int remain = count;
4299 struct sk_buff *skb;
4300 struct bt_skb_cb *scb;
4301
4302 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004303 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304304 return -EILSEQ;
4305
4306 skb = hdev->reassembly[index];
4307
4308 if (!skb) {
4309 switch (type) {
4310 case HCI_ACLDATA_PKT:
4311 len = HCI_MAX_FRAME_SIZE;
4312 hlen = HCI_ACL_HDR_SIZE;
4313 break;
4314 case HCI_EVENT_PKT:
4315 len = HCI_MAX_EVENT_SIZE;
4316 hlen = HCI_EVENT_HDR_SIZE;
4317 break;
4318 case HCI_SCODATA_PKT:
4319 len = HCI_MAX_SCO_SIZE;
4320 hlen = HCI_SCO_HDR_SIZE;
4321 break;
4322 }
4323
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004324 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304325 if (!skb)
4326 return -ENOMEM;
4327
4328 scb = (void *) skb->cb;
4329 scb->expect = hlen;
4330 scb->pkt_type = type;
4331
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304332 hdev->reassembly[index] = skb;
4333 }
4334
4335 while (count) {
4336 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004337 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304338
4339 memcpy(skb_put(skb, len), data, len);
4340
4341 count -= len;
4342 data += len;
4343 scb->expect -= len;
4344 remain = count;
4345
4346 switch (type) {
4347 case HCI_EVENT_PKT:
4348 if (skb->len == HCI_EVENT_HDR_SIZE) {
4349 struct hci_event_hdr *h = hci_event_hdr(skb);
4350 scb->expect = h->plen;
4351
4352 if (skb_tailroom(skb) < scb->expect) {
4353 kfree_skb(skb);
4354 hdev->reassembly[index] = NULL;
4355 return -ENOMEM;
4356 }
4357 }
4358 break;
4359
4360 case HCI_ACLDATA_PKT:
4361 if (skb->len == HCI_ACL_HDR_SIZE) {
4362 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4363 scb->expect = __le16_to_cpu(h->dlen);
4364
4365 if (skb_tailroom(skb) < scb->expect) {
4366 kfree_skb(skb);
4367 hdev->reassembly[index] = NULL;
4368 return -ENOMEM;
4369 }
4370 }
4371 break;
4372
4373 case HCI_SCODATA_PKT:
4374 if (skb->len == HCI_SCO_HDR_SIZE) {
4375 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4376 scb->expect = h->dlen;
4377
4378 if (skb_tailroom(skb) < scb->expect) {
4379 kfree_skb(skb);
4380 hdev->reassembly[index] = NULL;
4381 return -ENOMEM;
4382 }
4383 }
4384 break;
4385 }
4386
4387 if (scb->expect == 0) {
4388 /* Complete frame */
4389
4390 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004391 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304392
4393 hdev->reassembly[index] = NULL;
4394 return remain;
4395 }
4396 }
4397
4398 return remain;
4399}
4400
Suraj Sumangala99811512010-07-14 13:02:19 +05304401#define STREAM_REASSEMBLY 0
4402
4403int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4404{
4405 int type;
4406 int rem = 0;
4407
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004408 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304409 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4410
4411 if (!skb) {
4412 struct { char type; } *pkt;
4413
4414 /* Start of the frame */
4415 pkt = data;
4416 type = pkt->type;
4417
4418 data++;
4419 count--;
4420 } else
4421 type = bt_cb(skb)->pkt_type;
4422
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004423 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004424 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304425 if (rem < 0)
4426 return rem;
4427
4428 data += (count - rem);
4429 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004430 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304431
4432 return rem;
4433}
4434EXPORT_SYMBOL(hci_recv_stream_fragment);
4435
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436/* ---- Interface to upper protocols ---- */
4437
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438int hci_register_cb(struct hci_cb *cb)
4439{
4440 BT_DBG("%p name %s", cb, cb->name);
4441
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004442 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004444 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445
4446 return 0;
4447}
4448EXPORT_SYMBOL(hci_register_cb);
4449
4450int hci_unregister_cb(struct hci_cb *cb)
4451{
4452 BT_DBG("%p name %s", cb, cb->name);
4453
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004454 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004456 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457
4458 return 0;
4459}
4460EXPORT_SYMBOL(hci_unregister_cb);
4461
Marcel Holtmann51086992013-10-10 14:54:19 -07004462static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004464 int err;
4465
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004466 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004468 /* Time stamp */
4469 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004471 /* Send copy to monitor */
4472 hci_send_to_monitor(hdev, skb);
4473
4474 if (atomic_read(&hdev->promisc)) {
4475 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004476 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 }
4478
4479 /* Get rid of skb owner, prior to sending to the driver. */
4480 skb_orphan(skb);
4481
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004482 err = hdev->send(hdev, skb);
4483 if (err < 0) {
4484 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4485 kfree_skb(skb);
4486 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487}
4488
Johan Hedberg3119ae92013-03-05 20:37:44 +02004489void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4490{
4491 skb_queue_head_init(&req->cmd_q);
4492 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004493 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004494}
4495
4496int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4497{
4498 struct hci_dev *hdev = req->hdev;
4499 struct sk_buff *skb;
4500 unsigned long flags;
4501
4502 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4503
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004504 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004505 * commands queued on the HCI request queue.
4506 */
4507 if (req->err) {
4508 skb_queue_purge(&req->cmd_q);
4509 return req->err;
4510 }
4511
Johan Hedberg3119ae92013-03-05 20:37:44 +02004512 /* Do not allow empty requests */
4513 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004514 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004515
4516 skb = skb_peek_tail(&req->cmd_q);
4517 bt_cb(skb)->req.complete = complete;
4518
4519 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4520 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4521 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4522
4523 queue_work(hdev->workqueue, &hdev->cmd_work);
4524
4525 return 0;
4526}
4527
Marcel Holtmann899de762014-07-11 05:51:58 +02004528bool hci_req_pending(struct hci_dev *hdev)
4529{
4530 return (hdev->req_status == HCI_REQ_PEND);
4531}
4532
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004533static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004534 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535{
4536 int len = HCI_COMMAND_HDR_SIZE + plen;
4537 struct hci_command_hdr *hdr;
4538 struct sk_buff *skb;
4539
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004541 if (!skb)
4542 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543
4544 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004545 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 hdr->plen = plen;
4547
4548 if (plen)
4549 memcpy(skb_put(skb, plen), param, plen);
4550
4551 BT_DBG("skb len %d", skb->len);
4552
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004553 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004554 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004555
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004556 return skb;
4557}
4558
4559/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004560int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4561 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004562{
4563 struct sk_buff *skb;
4564
4565 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4566
4567 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4568 if (!skb) {
4569 BT_ERR("%s no memory for command", hdev->name);
4570 return -ENOMEM;
4571 }
4572
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004573 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004574 * single-command requests.
4575 */
4576 bt_cb(skb)->req.start = true;
4577
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004579 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580
4581 return 0;
4582}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583
Johan Hedberg71c76a12013-03-05 20:37:46 +02004584/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004585void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4586 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004587{
4588 struct hci_dev *hdev = req->hdev;
4589 struct sk_buff *skb;
4590
4591 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4592
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004593 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004594 * queueing the HCI command. We can simply return.
4595 */
4596 if (req->err)
4597 return;
4598
Johan Hedberg71c76a12013-03-05 20:37:46 +02004599 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4600 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004601 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4602 hdev->name, opcode);
4603 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004604 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004605 }
4606
4607 if (skb_queue_empty(&req->cmd_q))
4608 bt_cb(skb)->req.start = true;
4609
Johan Hedberg02350a72013-04-03 21:50:29 +03004610 bt_cb(skb)->req.event = event;
4611
Johan Hedberg71c76a12013-03-05 20:37:46 +02004612 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004613}
4614
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004615void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4616 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004617{
4618 hci_req_add_ev(req, opcode, plen, param, 0);
4619}
4620
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004622void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623{
4624 struct hci_command_hdr *hdr;
4625
4626 if (!hdev->sent_cmd)
4627 return NULL;
4628
4629 hdr = (void *) hdev->sent_cmd->data;
4630
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004631 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632 return NULL;
4633
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004634 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635
4636 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4637}
4638
4639/* Send ACL data */
4640static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4641{
4642 struct hci_acl_hdr *hdr;
4643 int len = skb->len;
4644
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004645 skb_push(skb, HCI_ACL_HDR_SIZE);
4646 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004647 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004648 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4649 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650}
4651
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004652static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004653 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004655 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 struct hci_dev *hdev = conn->hdev;
4657 struct sk_buff *list;
4658
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004659 skb->len = skb_headlen(skb);
4660 skb->data_len = 0;
4661
4662 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004663
4664 switch (hdev->dev_type) {
4665 case HCI_BREDR:
4666 hci_add_acl_hdr(skb, conn->handle, flags);
4667 break;
4668 case HCI_AMP:
4669 hci_add_acl_hdr(skb, chan->handle, flags);
4670 break;
4671 default:
4672 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4673 return;
4674 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004675
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004676 list = skb_shinfo(skb)->frag_list;
4677 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 /* Non fragmented */
4679 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4680
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004681 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 } else {
4683 /* Fragmented */
4684 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4685
4686 skb_shinfo(skb)->frag_list = NULL;
4687
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004688 /* Queue all fragments atomically. We need to use spin_lock_bh
4689 * here because of 6LoWPAN links, as there this function is
4690 * called from softirq and using normal spin lock could cause
4691 * deadlocks.
4692 */
4693 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004695 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004696
4697 flags &= ~ACL_START;
4698 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 do {
4700 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004701
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004702 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004703 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704
4705 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4706
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004707 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 } while (list);
4709
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004710 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004712}
4713
4714void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4715{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004716 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004717
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004718 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004719
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004720 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004722 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724
4725/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004726void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727{
4728 struct hci_dev *hdev = conn->hdev;
4729 struct hci_sco_hdr hdr;
4730
4731 BT_DBG("%s len %d", hdev->name, skb->len);
4732
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004733 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734 hdr.dlen = skb->len;
4735
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004736 skb_push(skb, HCI_SCO_HDR_SIZE);
4737 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004738 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004740 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004741
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004743 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745
4746/* ---- HCI TX task (outgoing data) ---- */
4747
4748/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004749static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4750 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751{
4752 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004753 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004754 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004756 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004758
4759 rcu_read_lock();
4760
4761 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004762 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004764
4765 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4766 continue;
4767
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768 num++;
4769
4770 if (c->sent < min) {
4771 min = c->sent;
4772 conn = c;
4773 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004774
4775 if (hci_conn_num(hdev, type) == num)
4776 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777 }
4778
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004779 rcu_read_unlock();
4780
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004782 int cnt, q;
4783
4784 switch (conn->type) {
4785 case ACL_LINK:
4786 cnt = hdev->acl_cnt;
4787 break;
4788 case SCO_LINK:
4789 case ESCO_LINK:
4790 cnt = hdev->sco_cnt;
4791 break;
4792 case LE_LINK:
4793 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4794 break;
4795 default:
4796 cnt = 0;
4797 BT_ERR("Unknown link type");
4798 }
4799
4800 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801 *quote = q ? q : 1;
4802 } else
4803 *quote = 0;
4804
4805 BT_DBG("conn %p quote %d", conn, *quote);
4806 return conn;
4807}
4808
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004809static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004810{
4811 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004812 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813
Ville Tervobae1f5d92011-02-10 22:38:53 -03004814 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004816 rcu_read_lock();
4817
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004819 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004820 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004821 BT_ERR("%s killing stalled connection %pMR",
4822 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004823 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004824 }
4825 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004826
4827 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828}
4829
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004830static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4831 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004832{
4833 struct hci_conn_hash *h = &hdev->conn_hash;
4834 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004835 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004836 struct hci_conn *conn;
4837 int cnt, q, conn_num = 0;
4838
4839 BT_DBG("%s", hdev->name);
4840
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004841 rcu_read_lock();
4842
4843 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004844 struct hci_chan *tmp;
4845
4846 if (conn->type != type)
4847 continue;
4848
4849 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4850 continue;
4851
4852 conn_num++;
4853
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004854 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004855 struct sk_buff *skb;
4856
4857 if (skb_queue_empty(&tmp->data_q))
4858 continue;
4859
4860 skb = skb_peek(&tmp->data_q);
4861 if (skb->priority < cur_prio)
4862 continue;
4863
4864 if (skb->priority > cur_prio) {
4865 num = 0;
4866 min = ~0;
4867 cur_prio = skb->priority;
4868 }
4869
4870 num++;
4871
4872 if (conn->sent < min) {
4873 min = conn->sent;
4874 chan = tmp;
4875 }
4876 }
4877
4878 if (hci_conn_num(hdev, type) == conn_num)
4879 break;
4880 }
4881
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004882 rcu_read_unlock();
4883
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004884 if (!chan)
4885 return NULL;
4886
4887 switch (chan->conn->type) {
4888 case ACL_LINK:
4889 cnt = hdev->acl_cnt;
4890 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004891 case AMP_LINK:
4892 cnt = hdev->block_cnt;
4893 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004894 case SCO_LINK:
4895 case ESCO_LINK:
4896 cnt = hdev->sco_cnt;
4897 break;
4898 case LE_LINK:
4899 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4900 break;
4901 default:
4902 cnt = 0;
4903 BT_ERR("Unknown link type");
4904 }
4905
4906 q = cnt / num;
4907 *quote = q ? q : 1;
4908 BT_DBG("chan %p quote %d", chan, *quote);
4909 return chan;
4910}
4911
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004912static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4913{
4914 struct hci_conn_hash *h = &hdev->conn_hash;
4915 struct hci_conn *conn;
4916 int num = 0;
4917
4918 BT_DBG("%s", hdev->name);
4919
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004920 rcu_read_lock();
4921
4922 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004923 struct hci_chan *chan;
4924
4925 if (conn->type != type)
4926 continue;
4927
4928 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4929 continue;
4930
4931 num++;
4932
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004933 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004934 struct sk_buff *skb;
4935
4936 if (chan->sent) {
4937 chan->sent = 0;
4938 continue;
4939 }
4940
4941 if (skb_queue_empty(&chan->data_q))
4942 continue;
4943
4944 skb = skb_peek(&chan->data_q);
4945 if (skb->priority >= HCI_PRIO_MAX - 1)
4946 continue;
4947
4948 skb->priority = HCI_PRIO_MAX - 1;
4949
4950 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004951 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004952 }
4953
4954 if (hci_conn_num(hdev, type) == num)
4955 break;
4956 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004957
4958 rcu_read_unlock();
4959
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004960}
4961
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004962static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4963{
4964 /* Calculate count of blocks used by this packet */
4965 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4966}
4967
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004968static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004970 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971 /* ACL tx timeout must be longer than maximum
4972 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004973 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004974 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004975 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004977}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004979static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004980{
4981 unsigned int cnt = hdev->acl_cnt;
4982 struct hci_chan *chan;
4983 struct sk_buff *skb;
4984 int quote;
4985
4986 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004987
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004988 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004989 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004990 u32 priority = (skb_peek(&chan->data_q))->priority;
4991 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004992 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004993 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004994
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004995 /* Stop if priority has changed */
4996 if (skb->priority < priority)
4997 break;
4998
4999 skb = skb_dequeue(&chan->data_q);
5000
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005001 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005002 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005003
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005004 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005 hdev->acl_last_tx = jiffies;
5006
5007 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005008 chan->sent++;
5009 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 }
5011 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005012
5013 if (cnt != hdev->acl_cnt)
5014 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015}
5016
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005017static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005018{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005019 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005020 struct hci_chan *chan;
5021 struct sk_buff *skb;
5022 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005023 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005024
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005025 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005026
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005027 BT_DBG("%s", hdev->name);
5028
5029 if (hdev->dev_type == HCI_AMP)
5030 type = AMP_LINK;
5031 else
5032 type = ACL_LINK;
5033
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005034 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005035 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005036 u32 priority = (skb_peek(&chan->data_q))->priority;
5037 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5038 int blocks;
5039
5040 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005041 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005042
5043 /* Stop if priority has changed */
5044 if (skb->priority < priority)
5045 break;
5046
5047 skb = skb_dequeue(&chan->data_q);
5048
5049 blocks = __get_blocks(hdev, skb);
5050 if (blocks > hdev->block_cnt)
5051 return;
5052
5053 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005054 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005055
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005056 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005057 hdev->acl_last_tx = jiffies;
5058
5059 hdev->block_cnt -= blocks;
5060 quote -= blocks;
5061
5062 chan->sent += blocks;
5063 chan->conn->sent += blocks;
5064 }
5065 }
5066
5067 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005068 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005069}
5070
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005071static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005072{
5073 BT_DBG("%s", hdev->name);
5074
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005075 /* No ACL link over BR/EDR controller */
5076 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5077 return;
5078
5079 /* No AMP link over AMP controller */
5080 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005081 return;
5082
5083 switch (hdev->flow_ctl_mode) {
5084 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5085 hci_sched_acl_pkt(hdev);
5086 break;
5087
5088 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5089 hci_sched_acl_blk(hdev);
5090 break;
5091 }
5092}
5093
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005095static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005096{
5097 struct hci_conn *conn;
5098 struct sk_buff *skb;
5099 int quote;
5100
5101 BT_DBG("%s", hdev->name);
5102
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005103 if (!hci_conn_num(hdev, SCO_LINK))
5104 return;
5105
Linus Torvalds1da177e2005-04-16 15:20:36 -07005106 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5107 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5108 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005109 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110
5111 conn->sent++;
5112 if (conn->sent == ~0)
5113 conn->sent = 0;
5114 }
5115 }
5116}
5117
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005118static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005119{
5120 struct hci_conn *conn;
5121 struct sk_buff *skb;
5122 int quote;
5123
5124 BT_DBG("%s", hdev->name);
5125
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005126 if (!hci_conn_num(hdev, ESCO_LINK))
5127 return;
5128
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005129 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5130 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005131 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5132 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005133 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005134
5135 conn->sent++;
5136 if (conn->sent == ~0)
5137 conn->sent = 0;
5138 }
5139 }
5140}
5141
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005142static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005143{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005144 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005145 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005146 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005147
5148 BT_DBG("%s", hdev->name);
5149
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005150 if (!hci_conn_num(hdev, LE_LINK))
5151 return;
5152
Marcel Holtmann4a964402014-07-02 19:10:33 +02005153 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005154 /* LE tx timeout must be longer than maximum
5155 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005156 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005157 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005158 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005159 }
5160
5161 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005162 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005163 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005164 u32 priority = (skb_peek(&chan->data_q))->priority;
5165 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005166 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005167 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005168
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005169 /* Stop if priority has changed */
5170 if (skb->priority < priority)
5171 break;
5172
5173 skb = skb_dequeue(&chan->data_q);
5174
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005175 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005176 hdev->le_last_tx = jiffies;
5177
5178 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005179 chan->sent++;
5180 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005181 }
5182 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005183
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005184 if (hdev->le_pkts)
5185 hdev->le_cnt = cnt;
5186 else
5187 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005188
5189 if (cnt != tmp)
5190 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005191}
5192
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005193static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005195 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196 struct sk_buff *skb;
5197
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005198 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005199 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200
Marcel Holtmann52de5992013-09-03 18:08:38 -07005201 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5202 /* Schedule queues and send stuff to HCI driver */
5203 hci_sched_acl(hdev);
5204 hci_sched_sco(hdev);
5205 hci_sched_esco(hdev);
5206 hci_sched_le(hdev);
5207 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005208
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209 /* Send next queued raw (unknown type) packet */
5210 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005211 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212}
5213
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005214/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215
5216/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005217static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218{
5219 struct hci_acl_hdr *hdr = (void *) skb->data;
5220 struct hci_conn *conn;
5221 __u16 handle, flags;
5222
5223 skb_pull(skb, HCI_ACL_HDR_SIZE);
5224
5225 handle = __le16_to_cpu(hdr->handle);
5226 flags = hci_flags(handle);
5227 handle = hci_handle(handle);
5228
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005229 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005230 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231
5232 hdev->stat.acl_rx++;
5233
5234 hci_dev_lock(hdev);
5235 conn = hci_conn_hash_lookup_handle(hdev, handle);
5236 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005237
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005239 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005240
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005242 l2cap_recv_acldata(conn, skb, flags);
5243 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005245 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005246 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247 }
5248
5249 kfree_skb(skb);
5250}
5251
5252/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005253static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005254{
5255 struct hci_sco_hdr *hdr = (void *) skb->data;
5256 struct hci_conn *conn;
5257 __u16 handle;
5258
5259 skb_pull(skb, HCI_SCO_HDR_SIZE);
5260
5261 handle = __le16_to_cpu(hdr->handle);
5262
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005263 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264
5265 hdev->stat.sco_rx++;
5266
5267 hci_dev_lock(hdev);
5268 conn = hci_conn_hash_lookup_handle(hdev, handle);
5269 hci_dev_unlock(hdev);
5270
5271 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005273 sco_recv_scodata(conn, skb);
5274 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005276 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005277 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278 }
5279
5280 kfree_skb(skb);
5281}
5282
Johan Hedberg9238f362013-03-05 20:37:48 +02005283static bool hci_req_is_complete(struct hci_dev *hdev)
5284{
5285 struct sk_buff *skb;
5286
5287 skb = skb_peek(&hdev->cmd_q);
5288 if (!skb)
5289 return true;
5290
5291 return bt_cb(skb)->req.start;
5292}
5293
Johan Hedberg42c6b122013-03-05 20:37:49 +02005294static void hci_resend_last(struct hci_dev *hdev)
5295{
5296 struct hci_command_hdr *sent;
5297 struct sk_buff *skb;
5298 u16 opcode;
5299
5300 if (!hdev->sent_cmd)
5301 return;
5302
5303 sent = (void *) hdev->sent_cmd->data;
5304 opcode = __le16_to_cpu(sent->opcode);
5305 if (opcode == HCI_OP_RESET)
5306 return;
5307
5308 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5309 if (!skb)
5310 return;
5311
5312 skb_queue_head(&hdev->cmd_q, skb);
5313 queue_work(hdev->workqueue, &hdev->cmd_work);
5314}
5315
Johan Hedberg9238f362013-03-05 20:37:48 +02005316void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5317{
5318 hci_req_complete_t req_complete = NULL;
5319 struct sk_buff *skb;
5320 unsigned long flags;
5321
5322 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5323
Johan Hedberg42c6b122013-03-05 20:37:49 +02005324 /* If the completed command doesn't match the last one that was
5325 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005326 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005327 if (!hci_sent_cmd_data(hdev, opcode)) {
5328 /* Some CSR based controllers generate a spontaneous
5329 * reset complete event during init and any pending
5330 * command will never be completed. In such a case we
5331 * need to resend whatever was the last sent
5332 * command.
5333 */
5334 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5335 hci_resend_last(hdev);
5336
Johan Hedberg9238f362013-03-05 20:37:48 +02005337 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005338 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005339
5340 /* If the command succeeded and there's still more commands in
5341 * this request the request is not yet complete.
5342 */
5343 if (!status && !hci_req_is_complete(hdev))
5344 return;
5345
5346 /* If this was the last command in a request the complete
5347 * callback would be found in hdev->sent_cmd instead of the
5348 * command queue (hdev->cmd_q).
5349 */
5350 if (hdev->sent_cmd) {
5351 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005352
5353 if (req_complete) {
5354 /* We must set the complete callback to NULL to
5355 * avoid calling the callback more than once if
5356 * this function gets called again.
5357 */
5358 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5359
Johan Hedberg9238f362013-03-05 20:37:48 +02005360 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005361 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005362 }
5363
5364 /* Remove all pending commands belonging to this request */
5365 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5366 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5367 if (bt_cb(skb)->req.start) {
5368 __skb_queue_head(&hdev->cmd_q, skb);
5369 break;
5370 }
5371
5372 req_complete = bt_cb(skb)->req.complete;
5373 kfree_skb(skb);
5374 }
5375 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5376
5377call_complete:
5378 if (req_complete)
5379 req_complete(hdev, status);
5380}
5381
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005382static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005383{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005384 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385 struct sk_buff *skb;
5386
5387 BT_DBG("%s", hdev->name);
5388
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005390 /* Send copy to monitor */
5391 hci_send_to_monitor(hdev, skb);
5392
Linus Torvalds1da177e2005-04-16 15:20:36 -07005393 if (atomic_read(&hdev->promisc)) {
5394 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005395 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005396 }
5397
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005398 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005399 kfree_skb(skb);
5400 continue;
5401 }
5402
5403 if (test_bit(HCI_INIT, &hdev->flags)) {
5404 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005405 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005406 case HCI_ACLDATA_PKT:
5407 case HCI_SCODATA_PKT:
5408 kfree_skb(skb);
5409 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005411 }
5412
5413 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005414 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005416 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005417 hci_event_packet(hdev, skb);
5418 break;
5419
5420 case HCI_ACLDATA_PKT:
5421 BT_DBG("%s ACL data packet", hdev->name);
5422 hci_acldata_packet(hdev, skb);
5423 break;
5424
5425 case HCI_SCODATA_PKT:
5426 BT_DBG("%s SCO data packet", hdev->name);
5427 hci_scodata_packet(hdev, skb);
5428 break;
5429
5430 default:
5431 kfree_skb(skb);
5432 break;
5433 }
5434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435}
5436
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005437static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005439 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 struct sk_buff *skb;
5441
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005442 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5443 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005446 if (atomic_read(&hdev->cmd_cnt)) {
5447 skb = skb_dequeue(&hdev->cmd_q);
5448 if (!skb)
5449 return;
5450
Wei Yongjun7585b972009-02-25 18:29:52 +08005451 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005452
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005453 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005454 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005455 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005456 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005457 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005458 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005459 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005460 schedule_delayed_work(&hdev->cmd_timer,
5461 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462 } else {
5463 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005464 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465 }
5466 }
5467}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005468
5469void hci_req_add_le_scan_disable(struct hci_request *req)
5470{
5471 struct hci_cp_le_set_scan_enable cp;
5472
5473 memset(&cp, 0, sizeof(cp));
5474 cp.enable = LE_SCAN_DISABLE;
5475 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5476}
Andre Guedesa4790db2014-02-26 20:21:47 -03005477
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005478static void add_to_white_list(struct hci_request *req,
5479 struct hci_conn_params *params)
5480{
5481 struct hci_cp_le_add_to_white_list cp;
5482
5483 cp.bdaddr_type = params->addr_type;
5484 bacpy(&cp.bdaddr, &params->addr);
5485
5486 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5487}
5488
5489static u8 update_white_list(struct hci_request *req)
5490{
5491 struct hci_dev *hdev = req->hdev;
5492 struct hci_conn_params *params;
5493 struct bdaddr_list *b;
5494 uint8_t white_list_entries = 0;
5495
5496 /* Go through the current white list programmed into the
5497 * controller one by one and check if that address is still
5498 * in the list of pending connections or list of devices to
5499 * report. If not present in either list, then queue the
5500 * command to remove it from the controller.
5501 */
5502 list_for_each_entry(b, &hdev->le_white_list, list) {
5503 struct hci_cp_le_del_from_white_list cp;
5504
5505 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5506 &b->bdaddr, b->bdaddr_type) ||
5507 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5508 &b->bdaddr, b->bdaddr_type)) {
5509 white_list_entries++;
5510 continue;
5511 }
5512
5513 cp.bdaddr_type = b->bdaddr_type;
5514 bacpy(&cp.bdaddr, &b->bdaddr);
5515
5516 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5517 sizeof(cp), &cp);
5518 }
5519
5520 /* Since all no longer valid white list entries have been
5521 * removed, walk through the list of pending connections
5522 * and ensure that any new device gets programmed into
5523 * the controller.
5524 *
5525 * If the list of the devices is larger than the list of
5526 * available white list entries in the controller, then
5527 * just abort and return filer policy value to not use the
5528 * white list.
5529 */
5530 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5531 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5532 &params->addr, params->addr_type))
5533 continue;
5534
5535 if (white_list_entries >= hdev->le_white_list_size) {
5536 /* Select filter policy to accept all advertising */
5537 return 0x00;
5538 }
5539
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005540 if (hci_find_irk_by_addr(hdev, &params->addr,
5541 params->addr_type)) {
5542 /* White list can not be used with RPAs */
5543 return 0x00;
5544 }
5545
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005546 white_list_entries++;
5547 add_to_white_list(req, params);
5548 }
5549
5550 /* After adding all new pending connections, walk through
5551 * the list of pending reports and also add these to the
5552 * white list if there is still space.
5553 */
5554 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5555 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5556 &params->addr, params->addr_type))
5557 continue;
5558
5559 if (white_list_entries >= hdev->le_white_list_size) {
5560 /* Select filter policy to accept all advertising */
5561 return 0x00;
5562 }
5563
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005564 if (hci_find_irk_by_addr(hdev, &params->addr,
5565 params->addr_type)) {
5566 /* White list can not be used with RPAs */
5567 return 0x00;
5568 }
5569
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005570 white_list_entries++;
5571 add_to_white_list(req, params);
5572 }
5573
5574 /* Select filter policy to use white list */
5575 return 0x01;
5576}
5577
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005578void hci_req_add_le_passive_scan(struct hci_request *req)
5579{
5580 struct hci_cp_le_set_scan_param param_cp;
5581 struct hci_cp_le_set_scan_enable enable_cp;
5582 struct hci_dev *hdev = req->hdev;
5583 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005584 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005585
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005586 /* Set require_privacy to false since no SCAN_REQ are send
5587 * during passive scanning. Not using an unresolvable address
5588 * here is important so that peer devices using direct
5589 * advertising with our address will be correctly reported
5590 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005591 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005592 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005593 return;
5594
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005595 /* Adding or removing entries from the white list must
5596 * happen before enabling scanning. The controller does
5597 * not allow white list modification while scanning.
5598 */
5599 filter_policy = update_white_list(req);
5600
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005601 memset(&param_cp, 0, sizeof(param_cp));
5602 param_cp.type = LE_SCAN_PASSIVE;
5603 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5604 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5605 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005606 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005607 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5608 &param_cp);
5609
5610 memset(&enable_cp, 0, sizeof(enable_cp));
5611 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005612 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005613 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5614 &enable_cp);
5615}
5616
Andre Guedesa4790db2014-02-26 20:21:47 -03005617static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5618{
5619 if (status)
5620 BT_DBG("HCI request failed to update background scanning: "
5621 "status 0x%2.2x", status);
5622}
5623
5624/* This function controls the background scanning based on hdev->pend_le_conns
5625 * list. If there are pending LE connection we start the background scanning,
5626 * otherwise we stop it.
5627 *
5628 * This function requires the caller holds hdev->lock.
5629 */
5630void hci_update_background_scan(struct hci_dev *hdev)
5631{
Andre Guedesa4790db2014-02-26 20:21:47 -03005632 struct hci_request req;
5633 struct hci_conn *conn;
5634 int err;
5635
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005636 if (!test_bit(HCI_UP, &hdev->flags) ||
5637 test_bit(HCI_INIT, &hdev->flags) ||
5638 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005639 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005640 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005641 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005642 return;
5643
Johan Hedberga70f4b52014-07-07 15:19:50 +03005644 /* No point in doing scanning if LE support hasn't been enabled */
5645 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5646 return;
5647
Johan Hedbergae23ada2014-07-07 13:24:59 +03005648 /* If discovery is active don't interfere with it */
5649 if (hdev->discovery.state != DISCOVERY_STOPPED)
5650 return;
5651
Andre Guedesa4790db2014-02-26 20:21:47 -03005652 hci_req_init(&req, hdev);
5653
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005654 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005655 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005656 /* If there is no pending LE connections or devices
5657 * to be scanned for, we should stop the background
5658 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005659 */
5660
5661 /* If controller is not scanning we are done. */
5662 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5663 return;
5664
5665 hci_req_add_le_scan_disable(&req);
5666
5667 BT_DBG("%s stopping background scanning", hdev->name);
5668 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005669 /* If there is at least one pending LE connection, we should
5670 * keep the background scan running.
5671 */
5672
Andre Guedesa4790db2014-02-26 20:21:47 -03005673 /* If controller is connecting, we should not start scanning
5674 * since some controllers are not able to scan and connect at
5675 * the same time.
5676 */
5677 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5678 if (conn)
5679 return;
5680
Andre Guedes4340a122014-03-10 18:26:24 -03005681 /* If controller is currently scanning, we stop it to ensure we
5682 * don't miss any advertising (due to duplicates filter).
5683 */
5684 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5685 hci_req_add_le_scan_disable(&req);
5686
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005687 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005688
5689 BT_DBG("%s starting background scanning", hdev->name);
5690 }
5691
5692 err = hci_req_run(&req, update_background_scan_complete);
5693 if (err)
5694 BT_ERR("Failed to run HCI request: err %d", err);
5695}
Johan Hedberg432df052014-08-01 11:13:31 +03005696
Johan Hedberg22f433d2014-08-01 11:13:32 +03005697static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5698{
5699 struct bdaddr_list *b;
5700
5701 list_for_each_entry(b, &hdev->whitelist, list) {
5702 struct hci_conn *conn;
5703
5704 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5705 if (!conn)
5706 return true;
5707
5708 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5709 return true;
5710 }
5711
5712 return false;
5713}
5714
Johan Hedberg432df052014-08-01 11:13:31 +03005715void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5716{
5717 u8 scan;
5718
5719 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5720 return;
5721
5722 if (!hdev_is_powered(hdev))
5723 return;
5724
5725 if (mgmt_powering_down(hdev))
5726 return;
5727
5728 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005729 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005730 scan = SCAN_PAGE;
5731 else
5732 scan = SCAN_DISABLED;
5733
5734 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5735 return;
5736
5737 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5738 scan |= SCAN_INQUIRY;
5739
5740 if (req)
5741 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5742 else
5743 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5744}