blob: e091e8ffeb8eab69b99e08f9cb7f0c5085d16d14 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800409static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static const struct file_operations sc_only_mode_fops = {
422 .open = simple_open,
423 .read = sc_only_mode_read,
424 .llseek = default_llseek,
425};
426
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700427static int idle_timeout_set(void *data, u64 val)
428{
429 struct hci_dev *hdev = data;
430
431 if (val != 0 && (val < 500 || val > 3600000))
432 return -EINVAL;
433
434 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700435 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700436 hci_dev_unlock(hdev);
437
438 return 0;
439}
440
441static int idle_timeout_get(void *data, u64 *val)
442{
443 struct hci_dev *hdev = data;
444
445 hci_dev_lock(hdev);
446 *val = hdev->idle_timeout;
447 hci_dev_unlock(hdev);
448
449 return 0;
450}
451
452DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
453 idle_timeout_set, "%llu\n");
454
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200455static int rpa_timeout_set(void *data, u64 val)
456{
457 struct hci_dev *hdev = data;
458
459 /* Require the RPA timeout to be at least 30 seconds and at most
460 * 24 hours.
461 */
462 if (val < 30 || val > (60 * 60 * 24))
463 return -EINVAL;
464
465 hci_dev_lock(hdev);
466 hdev->rpa_timeout = val;
467 hci_dev_unlock(hdev);
468
469 return 0;
470}
471
472static int rpa_timeout_get(void *data, u64 *val)
473{
474 struct hci_dev *hdev = data;
475
476 hci_dev_lock(hdev);
477 *val = hdev->rpa_timeout;
478 hci_dev_unlock(hdev);
479
480 return 0;
481}
482
483DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
484 rpa_timeout_set, "%llu\n");
485
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700486static int sniff_min_interval_set(void *data, u64 val)
487{
488 struct hci_dev *hdev = data;
489
490 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
491 return -EINVAL;
492
493 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700494 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700495 hci_dev_unlock(hdev);
496
497 return 0;
498}
499
500static int sniff_min_interval_get(void *data, u64 *val)
501{
502 struct hci_dev *hdev = data;
503
504 hci_dev_lock(hdev);
505 *val = hdev->sniff_min_interval;
506 hci_dev_unlock(hdev);
507
508 return 0;
509}
510
511DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
512 sniff_min_interval_set, "%llu\n");
513
514static int sniff_max_interval_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
519 return -EINVAL;
520
521 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700522 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700523 hci_dev_unlock(hdev);
524
525 return 0;
526}
527
528static int sniff_max_interval_get(void *data, u64 *val)
529{
530 struct hci_dev *hdev = data;
531
532 hci_dev_lock(hdev);
533 *val = hdev->sniff_max_interval;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
540 sniff_max_interval_set, "%llu\n");
541
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200542static int conn_info_min_age_set(void *data, u64 val)
543{
544 struct hci_dev *hdev = data;
545
546 if (val == 0 || val > hdev->conn_info_max_age)
547 return -EINVAL;
548
549 hci_dev_lock(hdev);
550 hdev->conn_info_min_age = val;
551 hci_dev_unlock(hdev);
552
553 return 0;
554}
555
556static int conn_info_min_age_get(void *data, u64 *val)
557{
558 struct hci_dev *hdev = data;
559
560 hci_dev_lock(hdev);
561 *val = hdev->conn_info_min_age;
562 hci_dev_unlock(hdev);
563
564 return 0;
565}
566
567DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
568 conn_info_min_age_set, "%llu\n");
569
570static int conn_info_max_age_set(void *data, u64 val)
571{
572 struct hci_dev *hdev = data;
573
574 if (val == 0 || val < hdev->conn_info_min_age)
575 return -EINVAL;
576
577 hci_dev_lock(hdev);
578 hdev->conn_info_max_age = val;
579 hci_dev_unlock(hdev);
580
581 return 0;
582}
583
584static int conn_info_max_age_get(void *data, u64 *val)
585{
586 struct hci_dev *hdev = data;
587
588 hci_dev_lock(hdev);
589 *val = hdev->conn_info_max_age;
590 hci_dev_unlock(hdev);
591
592 return 0;
593}
594
595DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
596 conn_info_max_age_set, "%llu\n");
597
Marcel Holtmannac345812014-02-23 12:44:25 -0800598static int identity_show(struct seq_file *f, void *p)
599{
600 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800602 u8 addr_type;
603
604 hci_dev_lock(hdev);
605
Johan Hedberga1f4c312014-02-27 14:05:41 +0200606 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800607
Johan Hedberga1f4c312014-02-27 14:05:41 +0200608 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800609 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800610
611 hci_dev_unlock(hdev);
612
613 return 0;
614}
615
616static int identity_open(struct inode *inode, struct file *file)
617{
618 return single_open(file, identity_show, inode->i_private);
619}
620
621static const struct file_operations identity_fops = {
622 .open = identity_open,
623 .read = seq_read,
624 .llseek = seq_lseek,
625 .release = single_release,
626};
627
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800628static int random_address_show(struct seq_file *f, void *p)
629{
630 struct hci_dev *hdev = f->private;
631
632 hci_dev_lock(hdev);
633 seq_printf(f, "%pMR\n", &hdev->random_addr);
634 hci_dev_unlock(hdev);
635
636 return 0;
637}
638
639static int random_address_open(struct inode *inode, struct file *file)
640{
641 return single_open(file, random_address_show, inode->i_private);
642}
643
644static const struct file_operations random_address_fops = {
645 .open = random_address_open,
646 .read = seq_read,
647 .llseek = seq_lseek,
648 .release = single_release,
649};
650
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700651static int static_address_show(struct seq_file *f, void *p)
652{
653 struct hci_dev *hdev = f->private;
654
655 hci_dev_lock(hdev);
656 seq_printf(f, "%pMR\n", &hdev->static_addr);
657 hci_dev_unlock(hdev);
658
659 return 0;
660}
661
662static int static_address_open(struct inode *inode, struct file *file)
663{
664 return single_open(file, static_address_show, inode->i_private);
665}
666
667static const struct file_operations static_address_fops = {
668 .open = static_address_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674static ssize_t force_static_address_read(struct file *file,
675 char __user *user_buf,
676 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700677{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800678 struct hci_dev *hdev = file->private_data;
679 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700680
Marcel Holtmann111902f2014-06-21 04:53:17 +0200681 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800682 buf[1] = '\n';
683 buf[2] = '\0';
684 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
685}
686
687static ssize_t force_static_address_write(struct file *file,
688 const char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[32];
693 size_t buf_size = min(count, (sizeof(buf)-1));
694 bool enable;
695
696 if (test_bit(HCI_UP, &hdev->flags))
697 return -EBUSY;
698
699 if (copy_from_user(buf, user_buf, buf_size))
700 return -EFAULT;
701
702 buf[buf_size] = '\0';
703 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700704 return -EINVAL;
705
Marcel Holtmann111902f2014-06-21 04:53:17 +0200706 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700708
Marcel Holtmann111902f2014-06-21 04:53:17 +0200709 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800710
711 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700712}
713
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800714static const struct file_operations force_static_address_fops = {
715 .open = simple_open,
716 .read = force_static_address_read,
717 .write = force_static_address_write,
718 .llseek = default_llseek,
719};
Marcel Holtmann92202182013-10-18 16:38:10 -0700720
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800721static int white_list_show(struct seq_file *f, void *ptr)
722{
723 struct hci_dev *hdev = f->private;
724 struct bdaddr_list *b;
725
726 hci_dev_lock(hdev);
727 list_for_each_entry(b, &hdev->le_white_list, list)
728 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
729 hci_dev_unlock(hdev);
730
731 return 0;
732}
733
734static int white_list_open(struct inode *inode, struct file *file)
735{
736 return single_open(file, white_list_show, inode->i_private);
737}
738
739static const struct file_operations white_list_fops = {
740 .open = white_list_open,
741 .read = seq_read,
742 .llseek = seq_lseek,
743 .release = single_release,
744};
745
Marcel Holtmann3698d702014-02-18 21:54:49 -0800746static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
747{
748 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200749 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800750
Johan Hedbergadae20c2014-11-13 14:37:48 +0200751 rcu_read_lock();
752 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800753 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
754 &irk->bdaddr, irk->addr_type,
755 16, irk->val, &irk->rpa);
756 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200757 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800758
759 return 0;
760}
761
762static int identity_resolving_keys_open(struct inode *inode, struct file *file)
763{
764 return single_open(file, identity_resolving_keys_show,
765 inode->i_private);
766}
767
768static const struct file_operations identity_resolving_keys_fops = {
769 .open = identity_resolving_keys_open,
770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
773};
774
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775static int long_term_keys_show(struct seq_file *f, void *ptr)
776{
777 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200778 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700779
Johan Hedberg970d0f12014-11-13 14:37:47 +0200780 rcu_read_lock();
781 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800782 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700783 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
784 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800785 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200786 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700787
788 return 0;
789}
790
791static int long_term_keys_open(struct inode *inode, struct file *file)
792{
793 return single_open(file, long_term_keys_show, inode->i_private);
794}
795
796static const struct file_operations long_term_keys_fops = {
797 .open = long_term_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700803static int conn_min_interval_set(void *data, u64 val)
804{
805 struct hci_dev *hdev = data;
806
807 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
808 return -EINVAL;
809
810 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700811 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700812 hci_dev_unlock(hdev);
813
814 return 0;
815}
816
817static int conn_min_interval_get(void *data, u64 *val)
818{
819 struct hci_dev *hdev = data;
820
821 hci_dev_lock(hdev);
822 *val = hdev->le_conn_min_interval;
823 hci_dev_unlock(hdev);
824
825 return 0;
826}
827
828DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
829 conn_min_interval_set, "%llu\n");
830
831static int conn_max_interval_set(void *data, u64 val)
832{
833 struct hci_dev *hdev = data;
834
835 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
836 return -EINVAL;
837
838 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700839 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700840 hci_dev_unlock(hdev);
841
842 return 0;
843}
844
845static int conn_max_interval_get(void *data, u64 *val)
846{
847 struct hci_dev *hdev = data;
848
849 hci_dev_lock(hdev);
850 *val = hdev->le_conn_max_interval;
851 hci_dev_unlock(hdev);
852
853 return 0;
854}
855
856DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
857 conn_max_interval_set, "%llu\n");
858
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200859static int conn_latency_set(void *data, u64 val)
860{
861 struct hci_dev *hdev = data;
862
863 if (val > 0x01f3)
864 return -EINVAL;
865
866 hci_dev_lock(hdev);
867 hdev->le_conn_latency = val;
868 hci_dev_unlock(hdev);
869
870 return 0;
871}
872
873static int conn_latency_get(void *data, u64 *val)
874{
875 struct hci_dev *hdev = data;
876
877 hci_dev_lock(hdev);
878 *val = hdev->le_conn_latency;
879 hci_dev_unlock(hdev);
880
881 return 0;
882}
883
884DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
885 conn_latency_set, "%llu\n");
886
Marcel Holtmannf1649572014-06-30 12:34:38 +0200887static int supervision_timeout_set(void *data, u64 val)
888{
889 struct hci_dev *hdev = data;
890
891 if (val < 0x000a || val > 0x0c80)
892 return -EINVAL;
893
894 hci_dev_lock(hdev);
895 hdev->le_supv_timeout = val;
896 hci_dev_unlock(hdev);
897
898 return 0;
899}
900
901static int supervision_timeout_get(void *data, u64 *val)
902{
903 struct hci_dev *hdev = data;
904
905 hci_dev_lock(hdev);
906 *val = hdev->le_supv_timeout;
907 hci_dev_unlock(hdev);
908
909 return 0;
910}
911
912DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
913 supervision_timeout_set, "%llu\n");
914
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800915static int adv_channel_map_set(void *data, u64 val)
916{
917 struct hci_dev *hdev = data;
918
919 if (val < 0x01 || val > 0x07)
920 return -EINVAL;
921
922 hci_dev_lock(hdev);
923 hdev->le_adv_channel_map = val;
924 hci_dev_unlock(hdev);
925
926 return 0;
927}
928
929static int adv_channel_map_get(void *data, u64 *val)
930{
931 struct hci_dev *hdev = data;
932
933 hci_dev_lock(hdev);
934 *val = hdev->le_adv_channel_map;
935 hci_dev_unlock(hdev);
936
937 return 0;
938}
939
940DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
941 adv_channel_map_set, "%llu\n");
942
Georg Lukas729a1052014-07-26 13:59:58 +0200943static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200944{
Georg Lukas729a1052014-07-26 13:59:58 +0200945 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200946
Georg Lukas729a1052014-07-26 13:59:58 +0200947 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200948 return -EINVAL;
949
Andre Guedes7d474e02014-02-26 20:21:54 -0300950 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200951 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300952 hci_dev_unlock(hdev);
953
954 return 0;
955}
956
Georg Lukas729a1052014-07-26 13:59:58 +0200957static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300958{
Georg Lukas729a1052014-07-26 13:59:58 +0200959 struct hci_dev *hdev = data;
960
961 hci_dev_lock(hdev);
962 *val = hdev->le_adv_min_interval;
963 hci_dev_unlock(hdev);
964
965 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300966}
967
Georg Lukas729a1052014-07-26 13:59:58 +0200968DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
969 adv_min_interval_set, "%llu\n");
970
971static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300972{
Georg Lukas729a1052014-07-26 13:59:58 +0200973 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300974
Georg Lukas729a1052014-07-26 13:59:58 +0200975 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 return -EINVAL;
977
Georg Lukas729a1052014-07-26 13:59:58 +0200978 hci_dev_lock(hdev);
979 hdev->le_adv_max_interval = val;
980 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300981
Georg Lukas729a1052014-07-26 13:59:58 +0200982 return 0;
983}
Andre Guedes7d474e02014-02-26 20:21:54 -0300984
Georg Lukas729a1052014-07-26 13:59:58 +0200985static int adv_max_interval_get(void *data, u64 *val)
986{
987 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300988
Georg Lukas729a1052014-07-26 13:59:58 +0200989 hci_dev_lock(hdev);
990 *val = hdev->le_adv_max_interval;
991 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300992
Georg Lukas729a1052014-07-26 13:59:58 +0200993 return 0;
994}
Andre Guedes7d474e02014-02-26 20:21:54 -0300995
Georg Lukas729a1052014-07-26 13:59:58 +0200996DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
997 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -0300998
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200999static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001000{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001001 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001002 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001003 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001004
Andre Guedes7d474e02014-02-26 20:21:54 -03001005 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001006 list_for_each_entry(b, &hdev->whitelist, list)
1007 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001008 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001009 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001010 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001011 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001012 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001013
Andre Guedes7d474e02014-02-26 20:21:54 -03001014 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001015}
1016
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001017static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001018{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001019 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001020}
1021
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001022static const struct file_operations device_list_fops = {
1023 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001024 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001025 .llseek = seq_lseek,
1026 .release = single_release,
1027};
1028
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029/* ---- HCI requests ---- */
1030
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 if (hdev->req_status == HCI_REQ_PEND) {
1036 hdev->req_result = result;
1037 hdev->req_status = HCI_REQ_DONE;
1038 wake_up_interruptible(&hdev->req_wait_q);
1039 }
1040}
1041
1042static void hci_req_cancel(struct hci_dev *hdev, int err)
1043{
1044 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1045
1046 if (hdev->req_status == HCI_REQ_PEND) {
1047 hdev->req_result = err;
1048 hdev->req_status = HCI_REQ_CANCELED;
1049 wake_up_interruptible(&hdev->req_wait_q);
1050 }
1051}
1052
Fengguang Wu77a63e02013-04-20 16:24:31 +03001053static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1054 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055{
1056 struct hci_ev_cmd_complete *ev;
1057 struct hci_event_hdr *hdr;
1058 struct sk_buff *skb;
1059
1060 hci_dev_lock(hdev);
1061
1062 skb = hdev->recv_evt;
1063 hdev->recv_evt = NULL;
1064
1065 hci_dev_unlock(hdev);
1066
1067 if (!skb)
1068 return ERR_PTR(-ENODATA);
1069
1070 if (skb->len < sizeof(*hdr)) {
1071 BT_ERR("Too short HCI event");
1072 goto failed;
1073 }
1074
1075 hdr = (void *) skb->data;
1076 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1077
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001078 if (event) {
1079 if (hdr->evt != event)
1080 goto failed;
1081 return skb;
1082 }
1083
Johan Hedberg75e84b72013-04-02 13:35:04 +03001084 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1085 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1086 goto failed;
1087 }
1088
1089 if (skb->len < sizeof(*ev)) {
1090 BT_ERR("Too short cmd_complete event");
1091 goto failed;
1092 }
1093
1094 ev = (void *) skb->data;
1095 skb_pull(skb, sizeof(*ev));
1096
1097 if (opcode == __le16_to_cpu(ev->opcode))
1098 return skb;
1099
1100 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1101 __le16_to_cpu(ev->opcode));
1102
1103failed:
1104 kfree_skb(skb);
1105 return ERR_PTR(-ENODATA);
1106}
1107
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001108struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001109 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001110{
1111 DECLARE_WAITQUEUE(wait, current);
1112 struct hci_request req;
1113 int err = 0;
1114
1115 BT_DBG("%s", hdev->name);
1116
1117 hci_req_init(&req, hdev);
1118
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001119 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001120
1121 hdev->req_status = HCI_REQ_PEND;
1122
Johan Hedberg75e84b72013-04-02 13:35:04 +03001123 add_wait_queue(&hdev->req_wait_q, &wait);
1124 set_current_state(TASK_INTERRUPTIBLE);
1125
Chan-yeol Park039fada2014-10-31 14:23:06 +09001126 err = hci_req_run(&req, hci_req_sync_complete);
1127 if (err < 0) {
1128 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001129 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001130 return ERR_PTR(err);
1131 }
1132
Johan Hedberg75e84b72013-04-02 13:35:04 +03001133 schedule_timeout(timeout);
1134
1135 remove_wait_queue(&hdev->req_wait_q, &wait);
1136
1137 if (signal_pending(current))
1138 return ERR_PTR(-EINTR);
1139
1140 switch (hdev->req_status) {
1141 case HCI_REQ_DONE:
1142 err = -bt_to_errno(hdev->req_result);
1143 break;
1144
1145 case HCI_REQ_CANCELED:
1146 err = -hdev->req_result;
1147 break;
1148
1149 default:
1150 err = -ETIMEDOUT;
1151 break;
1152 }
1153
1154 hdev->req_status = hdev->req_result = 0;
1155
1156 BT_DBG("%s end: err %d", hdev->name, err);
1157
1158 if (err < 0)
1159 return ERR_PTR(err);
1160
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001161 return hci_get_cmd_complete(hdev, opcode, event);
1162}
1163EXPORT_SYMBOL(__hci_cmd_sync_ev);
1164
1165struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001166 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001167{
1168 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001169}
1170EXPORT_SYMBOL(__hci_cmd_sync);
1171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 void (*func)(struct hci_request *req,
1175 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001176 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 DECLARE_WAITQUEUE(wait, current);
1180 int err = 0;
1181
1182 BT_DBG("%s start", hdev->name);
1183
Johan Hedberg42c6b122013-03-05 20:37:49 +02001184 hci_req_init(&req, hdev);
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 hdev->req_status = HCI_REQ_PEND;
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001189
Chan-yeol Park039fada2014-10-31 14:23:06 +09001190 add_wait_queue(&hdev->req_wait_q, &wait);
1191 set_current_state(TASK_INTERRUPTIBLE);
1192
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 err = hci_req_run(&req, hci_req_sync_complete);
1194 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001195 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001196
Chan-yeol Park039fada2014-10-31 14:23:06 +09001197 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001198 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001199
Andre Guedes920c8302013-03-08 11:20:15 -03001200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 */
Andre Guedes920c8302013-03-08 11:20:15 -03001205 if (err == -ENODATA)
1206 return 0;
1207
1208 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001209 }
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001220 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Johan Hedberga5040ef2011-01-10 13:28:59 +02001232 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
Johan Hedberg01178cd2013-03-05 20:37:41 +02001239static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243{
1244 int ret;
1245
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /* Serialize all requests */
1250 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001251 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Johan Hedberg42c6b122013-03-05 20:37:49 +02001266static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001273 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275
1276 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278}
1279
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001283
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001286
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001293 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001295
1296 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001298
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001304}
1305
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001307{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001315
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001319 break;
1320
1321 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001329}
1330
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001333 struct hci_dev *hdev = req->hdev;
1334
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340
1341 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343
1344 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346
1347 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
Johan Hedberg2177bab2013-03-05 20:37:43 +02001356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359
1360 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001361 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371}
1372
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001375 struct hci_dev *hdev = req->hdev;
1376
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379
1380 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001391
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395}
1396
1397static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398{
1399 if (lmp_ext_inq_capable(hdev))
1400 return 0x02;
1401
1402 if (lmp_inq_rssi_capable(hdev))
1403 return 0x01;
1404
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1407 return 0x01;
1408
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411 return 0x01;
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415 return 0x01;
1416 }
1417
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1420 return 0x01;
1421
1422 return 0x00;
1423}
1424
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426{
1427 u8 mode;
1428
Johan Hedberg42c6b122013-03-05 20:37:49 +02001429 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001430
Johan Hedberg42c6b122013-03-05 20:37:49 +02001431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001432}
1433
Johan Hedberg42c6b122013-03-05 20:37:49 +02001434static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001435{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001436 struct hci_dev *hdev = req->hdev;
1437
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1441 */
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1446 */
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 return;
1449
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001456 } else {
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001466
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509}
1510
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 struct hci_dev *hdev = req->hdev;
1514
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001517 else
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528
1529 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
Johan Hedberg2177bab2013-03-05 20:37:43 +02001538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001553 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001554
1555 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570 }
1571}
1572
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001574{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590}
1591
Johan Hedberg42c6b122013-03-05 20:37:49 +02001592static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001593{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595 struct hci_cp_write_le_host_supported cp;
1596
Johan Hedbergc73eee92013-04-19 18:35:21 +03001597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
Johan Hedberg2177bab2013-03-05 20:37:43 +02001601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001605 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611}
1612
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001621 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001631 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001638 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001640 events[2] |= 0x80;
1641
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
Johan Hedberg42c6b122013-03-05 20:37:49 +02001645static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001646{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001647 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001648 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001650 hci_setup_event_mask(req);
1651
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1655 *
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001660 *
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001664 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001667 struct hci_cp_delete_stored_link_key cp;
1668
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672 sizeof(cp), &cp);
1673 }
1674
Johan Hedberg2177bab2013-03-05 20:37:43 +02001675 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001676 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677
Andre Guedes9193c6e2014-07-01 18:10:09 -03001678 if (lmp_le_capable(hdev)) {
1679 u8 events[8];
1680
1681 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001682 events[0] = 0x0f;
1683
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001686
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1689 */
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1692 * Parameter Request
1693 */
1694
Andre Guedes9193c6e2014-07-01 18:10:09 -03001695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696 events);
1697
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701 }
1702
Johan Hedberg42c6b122013-03-05 20:37:49 +02001703 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001704 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001705
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1709
1710 cp.page = p;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712 sizeof(cp), &cp);
1713 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001714}
1715
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001716static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717{
1718 struct hci_dev *hdev = req->hdev;
1719
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1723
Marcel Holtmann109e3192014-07-23 19:24:56 +02001724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001732 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001733 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001735
1736 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001737 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001738 u8 support = 0x01;
1739 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1740 sizeof(support), &support);
1741 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001742}
1743
Johan Hedberg2177bab2013-03-05 20:37:43 +02001744static int __hci_init(struct hci_dev *hdev)
1745{
1746 int err;
1747
1748 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1749 if (err < 0)
1750 return err;
1751
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001752 /* The Device Under Test (DUT) mode is special and available for
1753 * all controller types. So just create it early on.
1754 */
1755 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1756 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1757 &dut_mode_fops);
1758 }
1759
Johan Hedberg2177bab2013-03-05 20:37:43 +02001760 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1761 * BR/EDR/LE type controllers. AMP controllers only need the
1762 * first stage init.
1763 */
1764 if (hdev->dev_type != HCI_BREDR)
1765 return 0;
1766
1767 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1768 if (err < 0)
1769 return err;
1770
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001771 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001775 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1776 if (err < 0)
1777 return err;
1778
1779 /* Only create debugfs entries during the initial setup
1780 * phase and not every time the controller gets powered on.
1781 */
1782 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1783 return 0;
1784
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001785 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1786 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001787 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1788 &hdev->manufacturer);
1789 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1790 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001791 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1792 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001793 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1794 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001795 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1796
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001797 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1798 &conn_info_min_age_fops);
1799 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_max_age_fops);
1801
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001802 if (lmp_bredr_capable(hdev)) {
1803 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1804 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001805 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1806 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001807 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1808 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001809 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1810 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001811 }
1812
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001813 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001814 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1815 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001816 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1817 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001818 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1819 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001820 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001821
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001822 if (lmp_sniff_capable(hdev)) {
1823 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1824 hdev, &idle_timeout_fops);
1825 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1826 hdev, &sniff_min_interval_fops);
1827 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_max_interval_fops);
1829 }
1830
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001831 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001832 debugfs_create_file("identity", 0400, hdev->debugfs,
1833 hdev, &identity_fops);
1834 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1835 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001836 debugfs_create_file("random_address", 0444, hdev->debugfs,
1837 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001838 debugfs_create_file("static_address", 0444, hdev->debugfs,
1839 hdev, &static_address_fops);
1840
1841 /* For controllers with a public address, provide a debug
1842 * option to force the usage of the configured static
1843 * address. By default the public address is used.
1844 */
1845 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1846 debugfs_create_file("force_static_address", 0644,
1847 hdev->debugfs, hdev,
1848 &force_static_address_fops);
1849
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001850 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1851 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001852 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1853 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001854 debugfs_create_file("identity_resolving_keys", 0400,
1855 hdev->debugfs, hdev,
1856 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001857 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1858 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001859 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1860 hdev, &conn_min_interval_fops);
1861 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1862 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001863 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1864 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001865 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1866 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001867 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1868 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001869 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1870 hdev, &adv_min_interval_fops);
1871 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1872 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001873 debugfs_create_u16("discov_interleaved_timeout", 0644,
1874 hdev->debugfs,
1875 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001876
Johan Hedberg711eafe2014-08-08 09:32:52 +03001877 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001878 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001879
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001880 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001881}
1882
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001883static void hci_init0_req(struct hci_request *req, unsigned long opt)
1884{
1885 struct hci_dev *hdev = req->hdev;
1886
1887 BT_DBG("%s %ld", hdev->name, opt);
1888
1889 /* Reset */
1890 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1891 hci_reset_req(req, 0);
1892
1893 /* Read Local Version */
1894 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1895
1896 /* Read BD Address */
1897 if (hdev->set_bdaddr)
1898 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1899}
1900
1901static int __hci_unconf_init(struct hci_dev *hdev)
1902{
1903 int err;
1904
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001905 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1906 return 0;
1907
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001908 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1909 if (err < 0)
1910 return err;
1911
1912 return 0;
1913}
1914
Johan Hedberg42c6b122013-03-05 20:37:49 +02001915static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
1917 __u8 scan = opt;
1918
Johan Hedberg42c6b122013-03-05 20:37:49 +02001919 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920
1921 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001922 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923}
1924
Johan Hedberg42c6b122013-03-05 20:37:49 +02001925static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926{
1927 __u8 auth = opt;
1928
Johan Hedberg42c6b122013-03-05 20:37:49 +02001929 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
1931 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001932 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933}
1934
Johan Hedberg42c6b122013-03-05 20:37:49 +02001935static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936{
1937 __u8 encrypt = opt;
1938
Johan Hedberg42c6b122013-03-05 20:37:49 +02001939 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001941 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001942 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943}
1944
Johan Hedberg42c6b122013-03-05 20:37:49 +02001945static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001946{
1947 __le16 policy = cpu_to_le16(opt);
1948
Johan Hedberg42c6b122013-03-05 20:37:49 +02001949 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001950
1951 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001952 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001953}
1954
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001955/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 * Device is held on return. */
1957struct hci_dev *hci_dev_get(int index)
1958{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001959 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 BT_DBG("%d", index);
1962
1963 if (index < 0)
1964 return NULL;
1965
1966 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001967 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 if (d->id == index) {
1969 hdev = hci_dev_hold(d);
1970 break;
1971 }
1972 }
1973 read_unlock(&hci_dev_list_lock);
1974 return hdev;
1975}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001978
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001979bool hci_discovery_active(struct hci_dev *hdev)
1980{
1981 struct discovery_state *discov = &hdev->discovery;
1982
Andre Guedes6fbe1952012-02-03 17:47:58 -03001983 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001984 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001985 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001986 return true;
1987
Andre Guedes6fbe1952012-02-03 17:47:58 -03001988 default:
1989 return false;
1990 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001991}
1992
Johan Hedbergff9ef572012-01-04 14:23:45 +02001993void hci_discovery_set_state(struct hci_dev *hdev, int state)
1994{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001995 int old_state = hdev->discovery.state;
1996
Johan Hedbergff9ef572012-01-04 14:23:45 +02001997 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1998
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001999 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002000 return;
2001
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002002 hdev->discovery.state = state;
2003
Johan Hedbergff9ef572012-01-04 14:23:45 +02002004 switch (state) {
2005 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002006 hci_update_background_scan(hdev);
2007
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002008 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002009 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002010 break;
2011 case DISCOVERY_STARTING:
2012 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002013 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002014 mgmt_discovering(hdev, 1);
2015 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002016 case DISCOVERY_RESOLVING:
2017 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002018 case DISCOVERY_STOPPING:
2019 break;
2020 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002021}
2022
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002023void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024{
Johan Hedberg30883512012-01-04 14:16:21 +02002025 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002026 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
Johan Hedberg561aafb2012-01-04 13:31:59 +02002028 list_for_each_entry_safe(p, n, &cache->all, all) {
2029 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002030 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002032
2033 INIT_LIST_HEAD(&cache->unknown);
2034 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035}
2036
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002037struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2038 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039{
Johan Hedberg30883512012-01-04 14:16:21 +02002040 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 struct inquiry_entry *e;
2042
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002043 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
Johan Hedberg561aafb2012-01-04 13:31:59 +02002045 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002047 return e;
2048 }
2049
2050 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051}
2052
Johan Hedberg561aafb2012-01-04 13:31:59 +02002053struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002054 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002055{
Johan Hedberg30883512012-01-04 14:16:21 +02002056 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002057 struct inquiry_entry *e;
2058
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002059 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002060
2061 list_for_each_entry(e, &cache->unknown, list) {
2062 if (!bacmp(&e->data.bdaddr, bdaddr))
2063 return e;
2064 }
2065
2066 return NULL;
2067}
2068
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002069struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002070 bdaddr_t *bdaddr,
2071 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002072{
2073 struct discovery_state *cache = &hdev->discovery;
2074 struct inquiry_entry *e;
2075
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002076 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002077
2078 list_for_each_entry(e, &cache->resolve, list) {
2079 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2080 return e;
2081 if (!bacmp(&e->data.bdaddr, bdaddr))
2082 return e;
2083 }
2084
2085 return NULL;
2086}
2087
Johan Hedberga3d4e202012-01-09 00:53:02 +02002088void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002089 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002090{
2091 struct discovery_state *cache = &hdev->discovery;
2092 struct list_head *pos = &cache->resolve;
2093 struct inquiry_entry *p;
2094
2095 list_del(&ie->list);
2096
2097 list_for_each_entry(p, &cache->resolve, list) {
2098 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002099 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002100 break;
2101 pos = &p->list;
2102 }
2103
2104 list_add(&ie->list, pos);
2105}
2106
Marcel Holtmannaf589252014-07-01 14:11:20 +02002107u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2108 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
Johan Hedberg30883512012-01-04 14:16:21 +02002110 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002111 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002112 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002114 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Szymon Janc2b2fec42012-11-20 11:38:54 +01002116 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2117
Marcel Holtmannaf589252014-07-01 14:11:20 +02002118 if (!data->ssp_mode)
2119 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002120
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002121 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002122 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002123 if (!ie->data.ssp_mode)
2124 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002125
Johan Hedberga3d4e202012-01-09 00:53:02 +02002126 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002127 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002128 ie->data.rssi = data->rssi;
2129 hci_inquiry_cache_update_resolve(hdev, ie);
2130 }
2131
Johan Hedberg561aafb2012-01-04 13:31:59 +02002132 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002133 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002134
Johan Hedberg561aafb2012-01-04 13:31:59 +02002135 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002136 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002137 if (!ie) {
2138 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2139 goto done;
2140 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002141
2142 list_add(&ie->all, &cache->all);
2143
2144 if (name_known) {
2145 ie->name_state = NAME_KNOWN;
2146 } else {
2147 ie->name_state = NAME_NOT_KNOWN;
2148 list_add(&ie->list, &cache->unknown);
2149 }
2150
2151update:
2152 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002153 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002154 ie->name_state = NAME_KNOWN;
2155 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 }
2157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 memcpy(&ie->data, data, sizeof(*data));
2159 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002161
2162 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002163 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002164
Marcel Holtmannaf589252014-07-01 14:11:20 +02002165done:
2166 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167}
2168
2169static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2170{
Johan Hedberg30883512012-01-04 14:16:21 +02002171 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 struct inquiry_info *info = (struct inquiry_info *) buf;
2173 struct inquiry_entry *e;
2174 int copied = 0;
2175
Johan Hedberg561aafb2012-01-04 13:31:59 +02002176 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002178
2179 if (copied >= num)
2180 break;
2181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 bacpy(&info->bdaddr, &data->bdaddr);
2183 info->pscan_rep_mode = data->pscan_rep_mode;
2184 info->pscan_period_mode = data->pscan_period_mode;
2185 info->pscan_mode = data->pscan_mode;
2186 memcpy(info->dev_class, data->dev_class, 3);
2187 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002190 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 }
2192
2193 BT_DBG("cache %p, copied %d", cache, copied);
2194 return copied;
2195}
2196
Johan Hedberg42c6b122013-03-05 20:37:49 +02002197static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198{
2199 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002200 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 struct hci_cp_inquiry cp;
2202
2203 BT_DBG("%s", hdev->name);
2204
2205 if (test_bit(HCI_INQUIRY, &hdev->flags))
2206 return;
2207
2208 /* Start Inquiry */
2209 memcpy(&cp.lap, &ir->lap, 3);
2210 cp.length = ir->length;
2211 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002212 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213}
2214
2215int hci_inquiry(void __user *arg)
2216{
2217 __u8 __user *ptr = arg;
2218 struct hci_inquiry_req ir;
2219 struct hci_dev *hdev;
2220 int err = 0, do_inquiry = 0, max_rsp;
2221 long timeo;
2222 __u8 *buf;
2223
2224 if (copy_from_user(&ir, ptr, sizeof(ir)))
2225 return -EFAULT;
2226
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002227 hdev = hci_dev_get(ir.dev_id);
2228 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 return -ENODEV;
2230
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 err = -EBUSY;
2233 goto done;
2234 }
2235
Marcel Holtmann4a964402014-07-02 19:10:33 +02002236 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002237 err = -EOPNOTSUPP;
2238 goto done;
2239 }
2240
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002241 if (hdev->dev_type != HCI_BREDR) {
2242 err = -EOPNOTSUPP;
2243 goto done;
2244 }
2245
Johan Hedberg56f87902013-10-02 13:43:13 +03002246 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2247 err = -EOPNOTSUPP;
2248 goto done;
2249 }
2250
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002251 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002252 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002253 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002254 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 do_inquiry = 1;
2256 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002257 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Marcel Holtmann04837f62006-07-03 10:02:33 +02002259 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002260
2261 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002262 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2263 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002264 if (err < 0)
2265 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002266
2267 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2268 * cleared). If it is interrupted by a signal, return -EINTR.
2269 */
NeilBrown74316202014-07-07 15:16:04 +10002270 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002271 TASK_INTERRUPTIBLE))
2272 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002275 /* for unlimited number of responses we will use buffer with
2276 * 255 entries
2277 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2279
2280 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2281 * copy it to the user space.
2282 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002283 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002284 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 err = -ENOMEM;
2286 goto done;
2287 }
2288
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002289 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002291 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
2293 BT_DBG("num_rsp %d", ir.num_rsp);
2294
2295 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2296 ptr += sizeof(ir);
2297 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002298 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002300 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 err = -EFAULT;
2302
2303 kfree(buf);
2304
2305done:
2306 hci_dev_put(hdev);
2307 return err;
2308}
2309
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002310static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 int ret = 0;
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 BT_DBG("%s %p", hdev->name, hdev);
2315
2316 hci_req_lock(hdev);
2317
Johan Hovold94324962012-03-15 14:48:41 +01002318 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2319 ret = -ENODEV;
2320 goto done;
2321 }
2322
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002323 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2324 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002325 /* Check for rfkill but allow the HCI setup stage to
2326 * proceed (which in itself doesn't cause any RF activity).
2327 */
2328 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2329 ret = -ERFKILL;
2330 goto done;
2331 }
2332
2333 /* Check for valid public address or a configured static
2334 * random adddress, but let the HCI setup proceed to
2335 * be able to determine if there is a public address
2336 * or not.
2337 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002338 * In case of user channel usage, it is not important
2339 * if a public address or static random address is
2340 * available.
2341 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002342 * This check is only valid for BR/EDR controllers
2343 * since AMP controllers do not have an address.
2344 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002345 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2346 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002347 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2348 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2349 ret = -EADDRNOTAVAIL;
2350 goto done;
2351 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002352 }
2353
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 if (test_bit(HCI_UP, &hdev->flags)) {
2355 ret = -EALREADY;
2356 goto done;
2357 }
2358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 if (hdev->open(hdev)) {
2360 ret = -EIO;
2361 goto done;
2362 }
2363
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002364 atomic_set(&hdev->cmd_cnt, 1);
2365 set_bit(HCI_INIT, &hdev->flags);
2366
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002367 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2368 if (hdev->setup)
2369 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002370
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002371 /* The transport driver can set these quirks before
2372 * creating the HCI device or in its setup callback.
2373 *
2374 * In case any of them is set, the controller has to
2375 * start up as unconfigured.
2376 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002377 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2378 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002379 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002380
2381 /* For an unconfigured controller it is required to
2382 * read at least the version information provided by
2383 * the Read Local Version Information command.
2384 *
2385 * If the set_bdaddr driver callback is provided, then
2386 * also the original Bluetooth public device address
2387 * will be read using the Read BD Address command.
2388 */
2389 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2390 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002391 }
2392
Marcel Holtmann9713c172014-07-06 12:11:15 +02002393 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2394 /* If public address change is configured, ensure that
2395 * the address gets programmed. If the driver does not
2396 * support changing the public address, fail the power
2397 * on procedure.
2398 */
2399 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2400 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002401 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2402 else
2403 ret = -EADDRNOTAVAIL;
2404 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002405
2406 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002407 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002408 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002409 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 }
2411
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002412 clear_bit(HCI_INIT, &hdev->flags);
2413
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 if (!ret) {
2415 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002416 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 set_bit(HCI_UP, &hdev->flags);
2418 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002419 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002420 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002421 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002422 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002423 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002424 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002425 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002426 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002427 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002428 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002430 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002431 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002432 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
2434 skb_queue_purge(&hdev->cmd_q);
2435 skb_queue_purge(&hdev->rx_q);
2436
2437 if (hdev->flush)
2438 hdev->flush(hdev);
2439
2440 if (hdev->sent_cmd) {
2441 kfree_skb(hdev->sent_cmd);
2442 hdev->sent_cmd = NULL;
2443 }
2444
2445 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002446 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 }
2448
2449done:
2450 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 return ret;
2452}
2453
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002454/* ---- HCI ioctl helpers ---- */
2455
2456int hci_dev_open(__u16 dev)
2457{
2458 struct hci_dev *hdev;
2459 int err;
2460
2461 hdev = hci_dev_get(dev);
2462 if (!hdev)
2463 return -ENODEV;
2464
Marcel Holtmann4a964402014-07-02 19:10:33 +02002465 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002466 * up as user channel. Trying to bring them up as normal devices
2467 * will result into a failure. Only user channel operation is
2468 * possible.
2469 *
2470 * When this function is called for a user channel, the flag
2471 * HCI_USER_CHANNEL will be set first before attempting to
2472 * open the device.
2473 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002474 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002475 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2476 err = -EOPNOTSUPP;
2477 goto done;
2478 }
2479
Johan Hedberge1d08f42013-10-01 22:44:50 +03002480 /* We need to ensure that no other power on/off work is pending
2481 * before proceeding to call hci_dev_do_open. This is
2482 * particularly important if the setup procedure has not yet
2483 * completed.
2484 */
2485 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2486 cancel_delayed_work(&hdev->power_off);
2487
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002488 /* After this call it is guaranteed that the setup procedure
2489 * has finished. This means that error conditions like RFKILL
2490 * or no valid public or static random address apply.
2491 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002492 flush_workqueue(hdev->req_workqueue);
2493
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002494 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002495 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002496 * so that pairing works for them. Once the management interface
2497 * is in use this bit will be cleared again and userspace has
2498 * to explicitly enable it.
2499 */
2500 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2501 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002502 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002503
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002504 err = hci_dev_do_open(hdev);
2505
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002506done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002507 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002508 return err;
2509}
2510
Johan Hedbergd7347f32014-07-04 12:37:23 +03002511/* This function requires the caller holds hdev->lock */
2512static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2513{
2514 struct hci_conn_params *p;
2515
Johan Hedbergf161dd42014-08-15 21:06:54 +03002516 list_for_each_entry(p, &hdev->le_conn_params, list) {
2517 if (p->conn) {
2518 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002519 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002520 p->conn = NULL;
2521 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002522 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002523 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002524
2525 BT_DBG("All LE pending actions cleared");
2526}
2527
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528static int hci_dev_do_close(struct hci_dev *hdev)
2529{
2530 BT_DBG("%s %p", hdev->name, hdev);
2531
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002532 cancel_delayed_work(&hdev->power_off);
2533
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 hci_req_cancel(hdev, ENODEV);
2535 hci_req_lock(hdev);
2536
2537 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002538 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 hci_req_unlock(hdev);
2540 return 0;
2541 }
2542
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002543 /* Flush RX and TX works */
2544 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002545 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002547 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002548 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002549 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002550 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002551 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002552 }
2553
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002554 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002555 cancel_delayed_work(&hdev->service_cache);
2556
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002557 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002558
2559 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2560 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002561
Johan Hedberg76727c02014-11-18 09:00:14 +02002562 /* Avoid potential lockdep warnings from the *_flush() calls by
2563 * ensuring the workqueue is empty up front.
2564 */
2565 drain_workqueue(hdev->workqueue);
2566
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002567 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002568 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002569 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002570 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002571 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572
2573 hci_notify(hdev, HCI_DEV_DOWN);
2574
2575 if (hdev->flush)
2576 hdev->flush(hdev);
2577
2578 /* Reset device */
2579 skb_queue_purge(&hdev->cmd_q);
2580 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002581 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2582 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002583 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002585 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 clear_bit(HCI_INIT, &hdev->flags);
2587 }
2588
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002589 /* flush cmd work */
2590 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
2592 /* Drop queues */
2593 skb_queue_purge(&hdev->rx_q);
2594 skb_queue_purge(&hdev->cmd_q);
2595 skb_queue_purge(&hdev->raw_q);
2596
2597 /* Drop last sent command */
2598 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002599 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 kfree_skb(hdev->sent_cmd);
2601 hdev->sent_cmd = NULL;
2602 }
2603
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002604 kfree_skb(hdev->recv_evt);
2605 hdev->recv_evt = NULL;
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 /* After this point our queues are empty
2608 * and no tasks are scheduled. */
2609 hdev->close(hdev);
2610
Johan Hedberg35b973c2013-03-15 17:06:59 -05002611 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002612 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002613 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2614
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002615 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2616 if (hdev->dev_type == HCI_BREDR) {
2617 hci_dev_lock(hdev);
2618 mgmt_powered(hdev, 0);
2619 hci_dev_unlock(hdev);
2620 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002621 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002622
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002623 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002624 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002625
Johan Hedberge59fda82012-02-22 18:11:53 +02002626 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002627 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002628 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 hci_req_unlock(hdev);
2631
2632 hci_dev_put(hdev);
2633 return 0;
2634}
2635
2636int hci_dev_close(__u16 dev)
2637{
2638 struct hci_dev *hdev;
2639 int err;
2640
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002641 hdev = hci_dev_get(dev);
2642 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002644
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002645 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2646 err = -EBUSY;
2647 goto done;
2648 }
2649
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002650 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2651 cancel_delayed_work(&hdev->power_off);
2652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002654
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002655done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 hci_dev_put(hdev);
2657 return err;
2658}
2659
2660int hci_dev_reset(__u16 dev)
2661{
2662 struct hci_dev *hdev;
2663 int ret = 0;
2664
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002665 hdev = hci_dev_get(dev);
2666 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 return -ENODEV;
2668
2669 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
Marcel Holtmann808a0492013-08-26 20:57:58 -07002671 if (!test_bit(HCI_UP, &hdev->flags)) {
2672 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002676 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2677 ret = -EBUSY;
2678 goto done;
2679 }
2680
Marcel Holtmann4a964402014-07-02 19:10:33 +02002681 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002682 ret = -EOPNOTSUPP;
2683 goto done;
2684 }
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 /* Drop queues */
2687 skb_queue_purge(&hdev->rx_q);
2688 skb_queue_purge(&hdev->cmd_q);
2689
Johan Hedberg76727c02014-11-18 09:00:14 +02002690 /* Avoid potential lockdep warnings from the *_flush() calls by
2691 * ensuring the workqueue is empty up front.
2692 */
2693 drain_workqueue(hdev->workqueue);
2694
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002695 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002696 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002698 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700 if (hdev->flush)
2701 hdev->flush(hdev);
2702
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002703 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002704 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002706 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707
2708done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 hci_req_unlock(hdev);
2710 hci_dev_put(hdev);
2711 return ret;
2712}
2713
2714int hci_dev_reset_stat(__u16 dev)
2715{
2716 struct hci_dev *hdev;
2717 int ret = 0;
2718
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002719 hdev = hci_dev_get(dev);
2720 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 return -ENODEV;
2722
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002723 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2724 ret = -EBUSY;
2725 goto done;
2726 }
2727
Marcel Holtmann4a964402014-07-02 19:10:33 +02002728 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002729 ret = -EOPNOTSUPP;
2730 goto done;
2731 }
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2734
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002735done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 return ret;
2738}
2739
Johan Hedberg123abc02014-07-10 12:09:07 +03002740static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2741{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002742 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002743
2744 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2745
2746 if ((scan & SCAN_PAGE))
2747 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2748 &hdev->dev_flags);
2749 else
2750 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2751 &hdev->dev_flags);
2752
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002753 if ((scan & SCAN_INQUIRY)) {
2754 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2755 &hdev->dev_flags);
2756 } else {
2757 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2758 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2759 &hdev->dev_flags);
2760 }
2761
Johan Hedberg123abc02014-07-10 12:09:07 +03002762 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2763 return;
2764
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002765 if (conn_changed || discov_changed) {
2766 /* In case this was disabled through mgmt */
2767 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2768
2769 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2770 mgmt_update_adv_data(hdev);
2771
Johan Hedberg123abc02014-07-10 12:09:07 +03002772 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002773 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002774}
2775
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776int hci_dev_cmd(unsigned int cmd, void __user *arg)
2777{
2778 struct hci_dev *hdev;
2779 struct hci_dev_req dr;
2780 int err = 0;
2781
2782 if (copy_from_user(&dr, arg, sizeof(dr)))
2783 return -EFAULT;
2784
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002785 hdev = hci_dev_get(dr.dev_id);
2786 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 return -ENODEV;
2788
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002789 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2790 err = -EBUSY;
2791 goto done;
2792 }
2793
Marcel Holtmann4a964402014-07-02 19:10:33 +02002794 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002795 err = -EOPNOTSUPP;
2796 goto done;
2797 }
2798
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002799 if (hdev->dev_type != HCI_BREDR) {
2800 err = -EOPNOTSUPP;
2801 goto done;
2802 }
2803
Johan Hedberg56f87902013-10-02 13:43:13 +03002804 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2805 err = -EOPNOTSUPP;
2806 goto done;
2807 }
2808
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 switch (cmd) {
2810 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002811 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2812 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 break;
2814
2815 case HCISETENCRYPT:
2816 if (!lmp_encrypt_capable(hdev)) {
2817 err = -EOPNOTSUPP;
2818 break;
2819 }
2820
2821 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2822 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002823 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2824 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 if (err)
2826 break;
2827 }
2828
Johan Hedberg01178cd2013-03-05 20:37:41 +02002829 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2830 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 break;
2832
2833 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002834 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2835 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002836
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002837 /* Ensure that the connectable and discoverable states
2838 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002839 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002840 if (!err)
2841 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 break;
2843
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002844 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002845 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2846 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002847 break;
2848
2849 case HCISETLINKMODE:
2850 hdev->link_mode = ((__u16) dr.dev_opt) &
2851 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2852 break;
2853
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 case HCISETPTYPE:
2855 hdev->pkt_type = (__u16) dr.dev_opt;
2856 break;
2857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002859 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2860 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 break;
2862
2863 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002864 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2865 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 break;
2867
2868 default:
2869 err = -EINVAL;
2870 break;
2871 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002872
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002873done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 hci_dev_put(hdev);
2875 return err;
2876}
2877
2878int hci_get_dev_list(void __user *arg)
2879{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002880 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 struct hci_dev_list_req *dl;
2882 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 int n = 0, size, err;
2884 __u16 dev_num;
2885
2886 if (get_user(dev_num, (__u16 __user *) arg))
2887 return -EFAULT;
2888
2889 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2890 return -EINVAL;
2891
2892 size = sizeof(*dl) + dev_num * sizeof(*dr);
2893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002894 dl = kzalloc(size, GFP_KERNEL);
2895 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 return -ENOMEM;
2897
2898 dr = dl->dev_req;
2899
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002900 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002901 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002902 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002903
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002904 /* When the auto-off is configured it means the transport
2905 * is running, but in that case still indicate that the
2906 * device is actually down.
2907 */
2908 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2909 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002910
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002912 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002913
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 if (++n >= dev_num)
2915 break;
2916 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002917 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
2919 dl->dev_num = n;
2920 size = sizeof(*dl) + n * sizeof(*dr);
2921
2922 err = copy_to_user(arg, dl, size);
2923 kfree(dl);
2924
2925 return err ? -EFAULT : 0;
2926}
2927
2928int hci_get_dev_info(void __user *arg)
2929{
2930 struct hci_dev *hdev;
2931 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002932 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 int err = 0;
2934
2935 if (copy_from_user(&di, arg, sizeof(di)))
2936 return -EFAULT;
2937
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002938 hdev = hci_dev_get(di.dev_id);
2939 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 return -ENODEV;
2941
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002942 /* When the auto-off is configured it means the transport
2943 * is running, but in that case still indicate that the
2944 * device is actually down.
2945 */
2946 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2947 flags = hdev->flags & ~BIT(HCI_UP);
2948 else
2949 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002950
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 strcpy(di.name, hdev->name);
2952 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002953 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002954 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002956 if (lmp_bredr_capable(hdev)) {
2957 di.acl_mtu = hdev->acl_mtu;
2958 di.acl_pkts = hdev->acl_pkts;
2959 di.sco_mtu = hdev->sco_mtu;
2960 di.sco_pkts = hdev->sco_pkts;
2961 } else {
2962 di.acl_mtu = hdev->le_mtu;
2963 di.acl_pkts = hdev->le_pkts;
2964 di.sco_mtu = 0;
2965 di.sco_pkts = 0;
2966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 di.link_policy = hdev->link_policy;
2968 di.link_mode = hdev->link_mode;
2969
2970 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2971 memcpy(&di.features, &hdev->features, sizeof(di.features));
2972
2973 if (copy_to_user(arg, &di, sizeof(di)))
2974 err = -EFAULT;
2975
2976 hci_dev_put(hdev);
2977
2978 return err;
2979}
2980
2981/* ---- Interface to HCI drivers ---- */
2982
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002983static int hci_rfkill_set_block(void *data, bool blocked)
2984{
2985 struct hci_dev *hdev = data;
2986
2987 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2988
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002989 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2990 return -EBUSY;
2991
Johan Hedberg5e130362013-09-13 08:58:17 +03002992 if (blocked) {
2993 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002994 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2995 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002996 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002997 } else {
2998 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002999 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003000
3001 return 0;
3002}
3003
3004static const struct rfkill_ops hci_rfkill_ops = {
3005 .set_block = hci_rfkill_set_block,
3006};
3007
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003008static void hci_power_on(struct work_struct *work)
3009{
3010 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003011 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003012
3013 BT_DBG("%s", hdev->name);
3014
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003015 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003016 if (err < 0) {
3017 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003018 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003019 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003020
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003021 /* During the HCI setup phase, a few error conditions are
3022 * ignored and they need to be checked now. If they are still
3023 * valid, it is important to turn the device back off.
3024 */
3025 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003026 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003027 (hdev->dev_type == HCI_BREDR &&
3028 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3029 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003030 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3031 hci_dev_do_close(hdev);
3032 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003033 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3034 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003035 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003036
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003037 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003038 /* For unconfigured devices, set the HCI_RAW flag
3039 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003040 */
3041 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3042 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003043
3044 /* For fully configured devices, this will send
3045 * the Index Added event. For unconfigured devices,
3046 * it will send Unconfigued Index Added event.
3047 *
3048 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3049 * and no event will be send.
3050 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003051 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003052 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003053 /* When the controller is now configured, then it
3054 * is important to clear the HCI_RAW flag.
3055 */
3056 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3057 clear_bit(HCI_RAW, &hdev->flags);
3058
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003059 /* Powering on the controller with HCI_CONFIG set only
3060 * happens with the transition from unconfigured to
3061 * configured. This will send the Index Added event.
3062 */
3063 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003064 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003065}
3066
3067static void hci_power_off(struct work_struct *work)
3068{
Johan Hedberg32435532011-11-07 22:16:04 +02003069 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003070 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003071
3072 BT_DBG("%s", hdev->name);
3073
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003074 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003075}
3076
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003077static void hci_discov_off(struct work_struct *work)
3078{
3079 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003080
3081 hdev = container_of(work, struct hci_dev, discov_off.work);
3082
3083 BT_DBG("%s", hdev->name);
3084
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003085 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003086}
3087
Johan Hedberg35f74982014-02-18 17:14:32 +02003088void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003089{
Johan Hedberg48210022013-01-27 00:31:28 +02003090 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003091
Johan Hedberg48210022013-01-27 00:31:28 +02003092 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3093 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003094 kfree(uuid);
3095 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003096}
3097
Johan Hedberg35f74982014-02-18 17:14:32 +02003098void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003099{
Johan Hedberg0378b592014-11-19 15:22:22 +02003100 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003101
Johan Hedberg0378b592014-11-19 15:22:22 +02003102 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3103 list_del_rcu(&key->list);
3104 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003105 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003106}
3107
Johan Hedberg35f74982014-02-18 17:14:32 +02003108void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003109{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003110 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003111
Johan Hedberg970d0f12014-11-13 14:37:47 +02003112 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3113 list_del_rcu(&k->list);
3114 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003115 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003116}
3117
Johan Hedberg970c4e42014-02-18 10:19:33 +02003118void hci_smp_irks_clear(struct hci_dev *hdev)
3119{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003120 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003121
Johan Hedbergadae20c2014-11-13 14:37:48 +02003122 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3123 list_del_rcu(&k->list);
3124 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003125 }
3126}
3127
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003128struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3129{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003130 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003131
Johan Hedberg0378b592014-11-19 15:22:22 +02003132 rcu_read_lock();
3133 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3134 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3135 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003136 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003137 }
3138 }
3139 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003140
3141 return NULL;
3142}
3143
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303144static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003145 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003146{
3147 /* Legacy key */
3148 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303149 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003150
3151 /* Debug keys are insecure so don't store them persistently */
3152 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303153 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003154
3155 /* Changed combination key and there's no previous one */
3156 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303157 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003158
3159 /* Security mode 3 case */
3160 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303161 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003162
3163 /* Neither local nor remote side had no-bonding as requirement */
3164 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303165 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003166
3167 /* Local side had dedicated bonding as requirement */
3168 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303169 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003170
3171 /* Remote side had dedicated bonding as requirement */
3172 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303173 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003174
3175 /* If none of the above criteria match, then don't store the key
3176 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303177 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003178}
3179
Johan Hedberge804d252014-07-16 11:42:28 +03003180static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003181{
Johan Hedberge804d252014-07-16 11:42:28 +03003182 if (type == SMP_LTK)
3183 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003184
Johan Hedberge804d252014-07-16 11:42:28 +03003185 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003186}
3187
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003188struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003189 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003190{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003191 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003192
Johan Hedberg970d0f12014-11-13 14:37:47 +02003193 rcu_read_lock();
3194 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003195 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003196 continue;
3197
Johan Hedberg5378bc52014-05-29 14:00:39 +03003198 if (smp_ltk_is_sc(k)) {
3199 if (k->type == SMP_LTK_P256_DEBUG &&
3200 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
3201 continue;
3202 rcu_read_unlock();
3203 return k;
3204 }
Johan Hedberg98a0b842014-01-30 19:40:00 -08003205
Johan Hedberg5378bc52014-05-29 14:00:39 +03003206 if (ltk_role(k->type) == role) {
3207 rcu_read_unlock();
3208 return k;
3209 }
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003210 }
Johan Hedberg970d0f12014-11-13 14:37:47 +02003211 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003212
3213 return NULL;
3214}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003215
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003216struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003217 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003218{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003219 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003220
Johan Hedberg970d0f12014-11-13 14:37:47 +02003221 rcu_read_lock();
3222 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003223 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3224 continue;
3225
3226 if (smp_ltk_is_sc(k)) {
3227 if (k->type == SMP_LTK_P256_DEBUG &&
3228 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
3229 continue;
3230 rcu_read_unlock();
3231 return k;
3232 }
3233
3234 if (ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003235 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003236 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003237 }
3238 }
3239 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003240
3241 return NULL;
3242}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003243
Johan Hedberg970c4e42014-02-18 10:19:33 +02003244struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3245{
3246 struct smp_irk *irk;
3247
Johan Hedbergadae20c2014-11-13 14:37:48 +02003248 rcu_read_lock();
3249 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3250 if (!bacmp(&irk->rpa, rpa)) {
3251 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003252 return irk;
3253 }
3254 }
3255
Johan Hedbergadae20c2014-11-13 14:37:48 +02003256 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3257 if (smp_irk_matches(hdev, irk->val, rpa)) {
3258 bacpy(&irk->rpa, rpa);
3259 rcu_read_unlock();
3260 return irk;
3261 }
3262 }
3263 rcu_read_unlock();
3264
Johan Hedberg970c4e42014-02-18 10:19:33 +02003265 return NULL;
3266}
3267
3268struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3269 u8 addr_type)
3270{
3271 struct smp_irk *irk;
3272
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003273 /* Identity Address must be public or static random */
3274 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3275 return NULL;
3276
Johan Hedbergadae20c2014-11-13 14:37:48 +02003277 rcu_read_lock();
3278 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003279 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003280 bacmp(bdaddr, &irk->bdaddr) == 0) {
3281 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003282 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003283 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003284 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003285 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003286
3287 return NULL;
3288}
3289
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003290struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003291 bdaddr_t *bdaddr, u8 *val, u8 type,
3292 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003293{
3294 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303295 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003296
3297 old_key = hci_find_link_key(hdev, bdaddr);
3298 if (old_key) {
3299 old_key_type = old_key->type;
3300 key = old_key;
3301 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003302 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003303 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003304 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003305 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003306 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003307 }
3308
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003309 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003310
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003311 /* Some buggy controller combinations generate a changed
3312 * combination key for legacy pairing even when there's no
3313 * previous key */
3314 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003315 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003316 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003317 if (conn)
3318 conn->key_type = type;
3319 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003320
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003321 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003322 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003323 key->pin_len = pin_len;
3324
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003325 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003326 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003327 else
3328 key->type = type;
3329
Johan Hedberg7652ff62014-06-24 13:15:49 +03003330 if (persistent)
3331 *persistent = hci_persistent_key(hdev, conn, type,
3332 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003333
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003334 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003335}
3336
Johan Hedbergca9142b2014-02-19 14:57:44 +02003337struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003338 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003339 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003340{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003341 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003342 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003343
Johan Hedberge804d252014-07-16 11:42:28 +03003344 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003345 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003346 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003347 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003348 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003349 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003350 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003351 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003352 }
3353
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003354 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003355 key->bdaddr_type = addr_type;
3356 memcpy(key->val, tk, sizeof(key->val));
3357 key->authenticated = authenticated;
3358 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003359 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003360 key->enc_size = enc_size;
3361 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003362
Johan Hedbergca9142b2014-02-19 14:57:44 +02003363 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003364}
3365
Johan Hedbergca9142b2014-02-19 14:57:44 +02003366struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3367 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003368{
3369 struct smp_irk *irk;
3370
3371 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3372 if (!irk) {
3373 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3374 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003375 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003376
3377 bacpy(&irk->bdaddr, bdaddr);
3378 irk->addr_type = addr_type;
3379
Johan Hedbergadae20c2014-11-13 14:37:48 +02003380 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003381 }
3382
3383 memcpy(irk->val, val, 16);
3384 bacpy(&irk->rpa, rpa);
3385
Johan Hedbergca9142b2014-02-19 14:57:44 +02003386 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003387}
3388
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003389int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3390{
3391 struct link_key *key;
3392
3393 key = hci_find_link_key(hdev, bdaddr);
3394 if (!key)
3395 return -ENOENT;
3396
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003397 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003398
Johan Hedberg0378b592014-11-19 15:22:22 +02003399 list_del_rcu(&key->list);
3400 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003401
3402 return 0;
3403}
3404
Johan Hedberge0b2b272014-02-18 17:14:31 +02003405int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003406{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003407 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003408 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003409
Johan Hedberg970d0f12014-11-13 14:37:47 +02003410 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003411 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003412 continue;
3413
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003414 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003415
Johan Hedberg970d0f12014-11-13 14:37:47 +02003416 list_del_rcu(&k->list);
3417 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003418 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003419 }
3420
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003421 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003422}
3423
Johan Hedberga7ec7332014-02-18 17:14:35 +02003424void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3425{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003426 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003427
Johan Hedbergadae20c2014-11-13 14:37:48 +02003428 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003429 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3430 continue;
3431
3432 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3433
Johan Hedbergadae20c2014-11-13 14:37:48 +02003434 list_del_rcu(&k->list);
3435 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003436 }
3437}
3438
Ville Tervo6bd32322011-02-16 16:32:41 +02003439/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003440static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003441{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003442 struct hci_dev *hdev = container_of(work, struct hci_dev,
3443 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003444
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003445 if (hdev->sent_cmd) {
3446 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3447 u16 opcode = __le16_to_cpu(sent->opcode);
3448
3449 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3450 } else {
3451 BT_ERR("%s command tx timeout", hdev->name);
3452 }
3453
Ville Tervo6bd32322011-02-16 16:32:41 +02003454 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003455 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003456}
3457
Szymon Janc2763eda2011-03-22 13:12:22 +01003458struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003459 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003460{
3461 struct oob_data *data;
3462
3463 list_for_each_entry(data, &hdev->remote_oob_data, list)
3464 if (bacmp(bdaddr, &data->bdaddr) == 0)
3465 return data;
3466
3467 return NULL;
3468}
3469
3470int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3471{
3472 struct oob_data *data;
3473
3474 data = hci_find_remote_oob_data(hdev, bdaddr);
3475 if (!data)
3476 return -ENOENT;
3477
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003478 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003479
3480 list_del(&data->list);
3481 kfree(data);
3482
3483 return 0;
3484}
3485
Johan Hedberg35f74982014-02-18 17:14:32 +02003486void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003487{
3488 struct oob_data *data, *n;
3489
3490 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3491 list_del(&data->list);
3492 kfree(data);
3493 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003494}
3495
Marcel Holtmann07988722014-01-10 02:07:29 -08003496int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003497 u8 *hash, u8 *rand)
Szymon Janc2763eda2011-03-22 13:12:22 +01003498{
3499 struct oob_data *data;
3500
3501 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003502 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003503 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003504 if (!data)
3505 return -ENOMEM;
3506
3507 bacpy(&data->bdaddr, bdaddr);
3508 list_add(&data->list, &hdev->remote_oob_data);
3509 }
3510
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003511 memcpy(data->hash192, hash, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003512 memcpy(data->rand192, rand, sizeof(data->rand192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003513
Marcel Holtmann07988722014-01-10 02:07:29 -08003514 memset(data->hash256, 0, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003515 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003516
3517 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3518
3519 return 0;
3520}
3521
3522int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003523 u8 *hash192, u8 *rand192,
3524 u8 *hash256, u8 *rand256)
Marcel Holtmann07988722014-01-10 02:07:29 -08003525{
3526 struct oob_data *data;
3527
3528 data = hci_find_remote_oob_data(hdev, bdaddr);
3529 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003530 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003531 if (!data)
3532 return -ENOMEM;
3533
3534 bacpy(&data->bdaddr, bdaddr);
3535 list_add(&data->list, &hdev->remote_oob_data);
3536 }
3537
3538 memcpy(data->hash192, hash192, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003539 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003540
3541 memcpy(data->hash256, hash256, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003542 memcpy(data->rand256, rand256, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003543
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003544 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003545
3546 return 0;
3547}
3548
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003549struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003550 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003551{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003552 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003553
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003554 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003555 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003556 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003557 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003558
3559 return NULL;
3560}
3561
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003562void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003563{
3564 struct list_head *p, *n;
3565
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003566 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003567 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003568
3569 list_del(p);
3570 kfree(b);
3571 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003572}
3573
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003574int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003575{
3576 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003577
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003578 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003579 return -EBADF;
3580
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003581 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003582 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003583
Johan Hedberg27f70f32014-07-21 10:50:06 +03003584 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003585 if (!entry)
3586 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003587
3588 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003589 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003590
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003591 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003592
3593 return 0;
3594}
3595
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003596int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003597{
3598 struct bdaddr_list *entry;
3599
Johan Hedberg35f74982014-02-18 17:14:32 +02003600 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003601 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003602 return 0;
3603 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003604
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003605 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003606 if (!entry)
3607 return -ENOENT;
3608
3609 list_del(&entry->list);
3610 kfree(entry);
3611
3612 return 0;
3613}
3614
Andre Guedes15819a72014-02-03 13:56:18 -03003615/* This function requires the caller holds hdev->lock */
3616struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3617 bdaddr_t *addr, u8 addr_type)
3618{
3619 struct hci_conn_params *params;
3620
Johan Hedberg738f6182014-07-03 19:33:51 +03003621 /* The conn params list only contains identity addresses */
3622 if (!hci_is_identity_address(addr, addr_type))
3623 return NULL;
3624
Andre Guedes15819a72014-02-03 13:56:18 -03003625 list_for_each_entry(params, &hdev->le_conn_params, list) {
3626 if (bacmp(&params->addr, addr) == 0 &&
3627 params->addr_type == addr_type) {
3628 return params;
3629 }
3630 }
3631
3632 return NULL;
3633}
3634
Andre Guedescef952c2014-02-26 20:21:49 -03003635static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3636{
3637 struct hci_conn *conn;
3638
3639 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3640 if (!conn)
3641 return false;
3642
3643 if (conn->dst_type != type)
3644 return false;
3645
3646 if (conn->state != BT_CONNECTED)
3647 return false;
3648
3649 return true;
3650}
3651
Andre Guedes15819a72014-02-03 13:56:18 -03003652/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003653struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3654 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003655{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003656 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003657
Johan Hedberg738f6182014-07-03 19:33:51 +03003658 /* The list only contains identity addresses */
3659 if (!hci_is_identity_address(addr, addr_type))
3660 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003661
Johan Hedberg501f8822014-07-04 12:37:26 +03003662 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003663 if (bacmp(&param->addr, addr) == 0 &&
3664 param->addr_type == addr_type)
3665 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003666 }
3667
3668 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003669}
3670
3671/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003672struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3673 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003674{
3675 struct hci_conn_params *params;
3676
Johan Hedbergc46245b2014-07-02 17:37:33 +03003677 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003678 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003679
Andre Guedes15819a72014-02-03 13:56:18 -03003680 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003681 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003682 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003683
3684 params = kzalloc(sizeof(*params), GFP_KERNEL);
3685 if (!params) {
3686 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003687 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003688 }
3689
3690 bacpy(&params->addr, addr);
3691 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003692
3693 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003694 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003695
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003696 params->conn_min_interval = hdev->le_conn_min_interval;
3697 params->conn_max_interval = hdev->le_conn_max_interval;
3698 params->conn_latency = hdev->le_conn_latency;
3699 params->supervision_timeout = hdev->le_supv_timeout;
3700 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3701
3702 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3703
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003704 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003705}
3706
3707/* This function requires the caller holds hdev->lock */
3708int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003709 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003710{
3711 struct hci_conn_params *params;
3712
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003713 params = hci_conn_params_add(hdev, addr, addr_type);
3714 if (!params)
3715 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003716
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003717 if (params->auto_connect == auto_connect)
3718 return 0;
3719
Johan Hedberg95305ba2014-07-04 12:37:21 +03003720 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003721
Andre Guedescef952c2014-02-26 20:21:49 -03003722 switch (auto_connect) {
3723 case HCI_AUTO_CONN_DISABLED:
3724 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003725 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003726 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003727 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003728 list_add(&params->action, &hdev->pend_le_reports);
3729 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003730 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003731 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003732 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003733 if (!is_connected(hdev, addr, addr_type)) {
3734 list_add(&params->action, &hdev->pend_le_conns);
3735 hci_update_background_scan(hdev);
3736 }
Andre Guedescef952c2014-02-26 20:21:49 -03003737 break;
3738 }
Andre Guedes15819a72014-02-03 13:56:18 -03003739
Johan Hedberg851efca2014-07-02 22:42:00 +03003740 params->auto_connect = auto_connect;
3741
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003742 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3743 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003744
3745 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003746}
3747
Johan Hedbergf6c63242014-08-15 21:06:59 +03003748static void hci_conn_params_free(struct hci_conn_params *params)
3749{
3750 if (params->conn) {
3751 hci_conn_drop(params->conn);
3752 hci_conn_put(params->conn);
3753 }
3754
3755 list_del(&params->action);
3756 list_del(&params->list);
3757 kfree(params);
3758}
3759
Andre Guedes15819a72014-02-03 13:56:18 -03003760/* This function requires the caller holds hdev->lock */
3761void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3762{
3763 struct hci_conn_params *params;
3764
3765 params = hci_conn_params_lookup(hdev, addr, addr_type);
3766 if (!params)
3767 return;
3768
Johan Hedbergf6c63242014-08-15 21:06:59 +03003769 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003770
Johan Hedberg95305ba2014-07-04 12:37:21 +03003771 hci_update_background_scan(hdev);
3772
Andre Guedes15819a72014-02-03 13:56:18 -03003773 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3774}
3775
3776/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003777void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003778{
3779 struct hci_conn_params *params, *tmp;
3780
3781 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003782 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3783 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003784 list_del(&params->list);
3785 kfree(params);
3786 }
3787
Johan Hedberg55af49a82014-07-02 17:37:26 +03003788 BT_DBG("All LE disabled connection parameters were removed");
3789}
3790
3791/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003792void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003793{
3794 struct hci_conn_params *params, *tmp;
3795
Johan Hedbergf6c63242014-08-15 21:06:59 +03003796 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3797 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003798
Johan Hedberga2f41a82014-07-04 12:37:19 +03003799 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003800
Andre Guedes15819a72014-02-03 13:56:18 -03003801 BT_DBG("All LE connection parameters were removed");
3802}
3803
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003804static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003805{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003806 if (status) {
3807 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003808
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003809 hci_dev_lock(hdev);
3810 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3811 hci_dev_unlock(hdev);
3812 return;
3813 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003814}
3815
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003816static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003817{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003818 /* General inquiry access code (GIAC) */
3819 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3820 struct hci_request req;
3821 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003822 int err;
3823
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003824 if (status) {
3825 BT_ERR("Failed to disable LE scanning: status %d", status);
3826 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003827 }
3828
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003829 switch (hdev->discovery.type) {
3830 case DISCOV_TYPE_LE:
3831 hci_dev_lock(hdev);
3832 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3833 hci_dev_unlock(hdev);
3834 break;
3835
3836 case DISCOV_TYPE_INTERLEAVED:
3837 hci_req_init(&req, hdev);
3838
3839 memset(&cp, 0, sizeof(cp));
3840 memcpy(&cp.lap, lap, sizeof(cp.lap));
3841 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3842 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3843
3844 hci_dev_lock(hdev);
3845
3846 hci_inquiry_cache_flush(hdev);
3847
3848 err = hci_req_run(&req, inquiry_complete);
3849 if (err) {
3850 BT_ERR("Inquiry request failed: err %d", err);
3851 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3852 }
3853
3854 hci_dev_unlock(hdev);
3855 break;
3856 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003857}
3858
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003859static void le_scan_disable_work(struct work_struct *work)
3860{
3861 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003862 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003863 struct hci_request req;
3864 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003865
3866 BT_DBG("%s", hdev->name);
3867
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003868 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003869
Andre Guedesb1efcc22014-02-26 20:21:40 -03003870 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003871
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003872 err = hci_req_run(&req, le_scan_disable_work_complete);
3873 if (err)
3874 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003875}
3876
Johan Hedberg8d972502014-02-28 12:54:14 +02003877static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3878{
3879 struct hci_dev *hdev = req->hdev;
3880
3881 /* If we're advertising or initiating an LE connection we can't
3882 * go ahead and change the random address at this time. This is
3883 * because the eventual initiator address used for the
3884 * subsequently created connection will be undefined (some
3885 * controllers use the new address and others the one we had
3886 * when the operation started).
3887 *
3888 * In this kind of scenario skip the update and let the random
3889 * address be updated at the next cycle.
3890 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003891 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003892 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3893 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003894 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003895 return;
3896 }
3897
3898 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3899}
3900
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003901int hci_update_random_address(struct hci_request *req, bool require_privacy,
3902 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003903{
3904 struct hci_dev *hdev = req->hdev;
3905 int err;
3906
3907 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003908 * current RPA has expired or there is something else than
3909 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003910 */
3911 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003912 int to;
3913
3914 *own_addr_type = ADDR_LE_DEV_RANDOM;
3915
3916 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003917 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003918 return 0;
3919
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003920 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003921 if (err < 0) {
3922 BT_ERR("%s failed to generate new RPA", hdev->name);
3923 return err;
3924 }
3925
Johan Hedberg8d972502014-02-28 12:54:14 +02003926 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003927
3928 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3929 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3930
3931 return 0;
3932 }
3933
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003934 /* In case of required privacy without resolvable private address,
3935 * use an unresolvable private address. This is useful for active
3936 * scanning and non-connectable advertising.
3937 */
3938 if (require_privacy) {
3939 bdaddr_t urpa;
3940
3941 get_random_bytes(&urpa, 6);
3942 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3943
3944 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003945 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003946 return 0;
3947 }
3948
Johan Hedbergebd3a742014-02-23 19:42:21 +02003949 /* If forcing static address is in use or there is no public
3950 * address use the static address as random address (but skip
3951 * the HCI command if the current random address is already the
3952 * static one.
3953 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003954 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003955 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3956 *own_addr_type = ADDR_LE_DEV_RANDOM;
3957 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3958 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3959 &hdev->static_addr);
3960 return 0;
3961 }
3962
3963 /* Neither privacy nor static address is being used so use a
3964 * public address.
3965 */
3966 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3967
3968 return 0;
3969}
3970
Johan Hedberga1f4c312014-02-27 14:05:41 +02003971/* Copy the Identity Address of the controller.
3972 *
3973 * If the controller has a public BD_ADDR, then by default use that one.
3974 * If this is a LE only controller without a public address, default to
3975 * the static random address.
3976 *
3977 * For debugging purposes it is possible to force controllers with a
3978 * public address to use the static random address instead.
3979 */
3980void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3981 u8 *bdaddr_type)
3982{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003983 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003984 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3985 bacpy(bdaddr, &hdev->static_addr);
3986 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3987 } else {
3988 bacpy(bdaddr, &hdev->bdaddr);
3989 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3990 }
3991}
3992
David Herrmann9be0dab2012-04-22 14:39:57 +02003993/* Alloc HCI device */
3994struct hci_dev *hci_alloc_dev(void)
3995{
3996 struct hci_dev *hdev;
3997
Johan Hedberg27f70f32014-07-21 10:50:06 +03003998 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003999 if (!hdev)
4000 return NULL;
4001
David Herrmannb1b813d2012-04-22 14:39:58 +02004002 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4003 hdev->esco_type = (ESCO_HV1);
4004 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07004005 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4006 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02004007 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01004008 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4009 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02004010
David Herrmannb1b813d2012-04-22 14:39:58 +02004011 hdev->sniff_max_interval = 800;
4012 hdev->sniff_min_interval = 80;
4013
Marcel Holtmann3f959d42014-02-20 11:55:56 -08004014 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004015 hdev->le_adv_min_interval = 0x0800;
4016 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004017 hdev->le_scan_interval = 0x0060;
4018 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004019 hdev->le_conn_min_interval = 0x0028;
4020 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004021 hdev->le_conn_latency = 0x0000;
4022 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004023
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004024 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004025 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004026 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4027 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004028
David Herrmannb1b813d2012-04-22 14:39:58 +02004029 mutex_init(&hdev->lock);
4030 mutex_init(&hdev->req_lock);
4031
4032 INIT_LIST_HEAD(&hdev->mgmt_pending);
4033 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004034 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004035 INIT_LIST_HEAD(&hdev->uuids);
4036 INIT_LIST_HEAD(&hdev->link_keys);
4037 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004038 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004039 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004040 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004041 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004042 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004043 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004044 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004045
4046 INIT_WORK(&hdev->rx_work, hci_rx_work);
4047 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4048 INIT_WORK(&hdev->tx_work, hci_tx_work);
4049 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004050
David Herrmannb1b813d2012-04-22 14:39:58 +02004051 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4052 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4053 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4054
David Herrmannb1b813d2012-04-22 14:39:58 +02004055 skb_queue_head_init(&hdev->rx_q);
4056 skb_queue_head_init(&hdev->cmd_q);
4057 skb_queue_head_init(&hdev->raw_q);
4058
4059 init_waitqueue_head(&hdev->req_wait_q);
4060
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004061 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004062
David Herrmannb1b813d2012-04-22 14:39:58 +02004063 hci_init_sysfs(hdev);
4064 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004065
4066 return hdev;
4067}
4068EXPORT_SYMBOL(hci_alloc_dev);
4069
4070/* Free HCI device */
4071void hci_free_dev(struct hci_dev *hdev)
4072{
David Herrmann9be0dab2012-04-22 14:39:57 +02004073 /* will free via device release */
4074 put_device(&hdev->dev);
4075}
4076EXPORT_SYMBOL(hci_free_dev);
4077
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078/* Register HCI device */
4079int hci_register_dev(struct hci_dev *hdev)
4080{
David Herrmannb1b813d2012-04-22 14:39:58 +02004081 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082
Marcel Holtmann74292d52014-07-06 15:50:27 +02004083 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 return -EINVAL;
4085
Mat Martineau08add512011-11-02 16:18:36 -07004086 /* Do not allow HCI_AMP devices to register at index 0,
4087 * so the index can be used as the AMP controller ID.
4088 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004089 switch (hdev->dev_type) {
4090 case HCI_BREDR:
4091 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4092 break;
4093 case HCI_AMP:
4094 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4095 break;
4096 default:
4097 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004099
Sasha Levin3df92b32012-05-27 22:36:56 +02004100 if (id < 0)
4101 return id;
4102
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 sprintf(hdev->name, "hci%d", id);
4104 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004105
4106 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4107
Kees Cookd8537542013-07-03 15:04:57 -07004108 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4109 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004110 if (!hdev->workqueue) {
4111 error = -ENOMEM;
4112 goto err;
4113 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004114
Kees Cookd8537542013-07-03 15:04:57 -07004115 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4116 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004117 if (!hdev->req_workqueue) {
4118 destroy_workqueue(hdev->workqueue);
4119 error = -ENOMEM;
4120 goto err;
4121 }
4122
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004123 if (!IS_ERR_OR_NULL(bt_debugfs))
4124 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4125
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004126 dev_set_name(&hdev->dev, "%s", hdev->name);
4127
4128 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004129 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004130 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004132 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004133 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4134 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004135 if (hdev->rfkill) {
4136 if (rfkill_register(hdev->rfkill) < 0) {
4137 rfkill_destroy(hdev->rfkill);
4138 hdev->rfkill = NULL;
4139 }
4140 }
4141
Johan Hedberg5e130362013-09-13 08:58:17 +03004142 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4143 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4144
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004145 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004146 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004147
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004148 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004149 /* Assume BR/EDR support until proven otherwise (such as
4150 * through reading supported features during init.
4151 */
4152 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4153 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004154
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004155 write_lock(&hci_dev_list_lock);
4156 list_add(&hdev->list, &hci_dev_list);
4157 write_unlock(&hci_dev_list_lock);
4158
Marcel Holtmann4a964402014-07-02 19:10:33 +02004159 /* Devices that are marked for raw-only usage are unconfigured
4160 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004161 */
4162 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004163 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004164
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004166 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167
Johan Hedberg19202572013-01-14 22:33:51 +02004168 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004169
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004171
David Herrmann33ca9542011-10-08 14:58:49 +02004172err_wqueue:
4173 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004174 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004175err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004176 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004177
David Herrmann33ca9542011-10-08 14:58:49 +02004178 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179}
4180EXPORT_SYMBOL(hci_register_dev);
4181
4182/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004183void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184{
Sasha Levin3df92b32012-05-27 22:36:56 +02004185 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004186
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004187 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188
Johan Hovold94324962012-03-15 14:48:41 +01004189 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4190
Sasha Levin3df92b32012-05-27 22:36:56 +02004191 id = hdev->id;
4192
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004193 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004195 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196
4197 hci_dev_do_close(hdev);
4198
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304199 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004200 kfree_skb(hdev->reassembly[i]);
4201
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004202 cancel_work_sync(&hdev->power_on);
4203
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004204 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004205 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4206 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004207 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004208 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004209 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004210 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004211
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004212 /* mgmt_index_removed should take care of emptying the
4213 * pending list */
4214 BUG_ON(!list_empty(&hdev->mgmt_pending));
4215
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 hci_notify(hdev, HCI_DEV_UNREG);
4217
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004218 if (hdev->rfkill) {
4219 rfkill_unregister(hdev->rfkill);
4220 rfkill_destroy(hdev->rfkill);
4221 }
4222
Johan Hedberg711eafe2014-08-08 09:32:52 +03004223 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004224
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004225 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004226
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004227 debugfs_remove_recursive(hdev->debugfs);
4228
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004229 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004230 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004231
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004232 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004233 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004234 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004235 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004236 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004237 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004238 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004239 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004240 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004241 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004242 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004243
David Herrmanndc946bd2012-01-07 15:47:24 +01004244 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004245
4246 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247}
4248EXPORT_SYMBOL(hci_unregister_dev);
4249
4250/* Suspend HCI device */
4251int hci_suspend_dev(struct hci_dev *hdev)
4252{
4253 hci_notify(hdev, HCI_DEV_SUSPEND);
4254 return 0;
4255}
4256EXPORT_SYMBOL(hci_suspend_dev);
4257
4258/* Resume HCI device */
4259int hci_resume_dev(struct hci_dev *hdev)
4260{
4261 hci_notify(hdev, HCI_DEV_RESUME);
4262 return 0;
4263}
4264EXPORT_SYMBOL(hci_resume_dev);
4265
Marcel Holtmann75e05692014-11-02 08:15:38 +01004266/* Reset HCI device */
4267int hci_reset_dev(struct hci_dev *hdev)
4268{
4269 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4270 struct sk_buff *skb;
4271
4272 skb = bt_skb_alloc(3, GFP_ATOMIC);
4273 if (!skb)
4274 return -ENOMEM;
4275
4276 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4277 memcpy(skb_put(skb, 3), hw_err, 3);
4278
4279 /* Send Hardware Error to upper stack */
4280 return hci_recv_frame(hdev, skb);
4281}
4282EXPORT_SYMBOL(hci_reset_dev);
4283
Marcel Holtmann76bca882009-11-18 00:40:39 +01004284/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004285int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004286{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004287 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004288 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004289 kfree_skb(skb);
4290 return -ENXIO;
4291 }
4292
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004293 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004294 bt_cb(skb)->incoming = 1;
4295
4296 /* Time stamp */
4297 __net_timestamp(skb);
4298
Marcel Holtmann76bca882009-11-18 00:40:39 +01004299 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004300 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004301
Marcel Holtmann76bca882009-11-18 00:40:39 +01004302 return 0;
4303}
4304EXPORT_SYMBOL(hci_recv_frame);
4305
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304306static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004307 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304308{
4309 int len = 0;
4310 int hlen = 0;
4311 int remain = count;
4312 struct sk_buff *skb;
4313 struct bt_skb_cb *scb;
4314
4315 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004316 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304317 return -EILSEQ;
4318
4319 skb = hdev->reassembly[index];
4320
4321 if (!skb) {
4322 switch (type) {
4323 case HCI_ACLDATA_PKT:
4324 len = HCI_MAX_FRAME_SIZE;
4325 hlen = HCI_ACL_HDR_SIZE;
4326 break;
4327 case HCI_EVENT_PKT:
4328 len = HCI_MAX_EVENT_SIZE;
4329 hlen = HCI_EVENT_HDR_SIZE;
4330 break;
4331 case HCI_SCODATA_PKT:
4332 len = HCI_MAX_SCO_SIZE;
4333 hlen = HCI_SCO_HDR_SIZE;
4334 break;
4335 }
4336
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004337 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304338 if (!skb)
4339 return -ENOMEM;
4340
4341 scb = (void *) skb->cb;
4342 scb->expect = hlen;
4343 scb->pkt_type = type;
4344
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304345 hdev->reassembly[index] = skb;
4346 }
4347
4348 while (count) {
4349 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004350 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304351
4352 memcpy(skb_put(skb, len), data, len);
4353
4354 count -= len;
4355 data += len;
4356 scb->expect -= len;
4357 remain = count;
4358
4359 switch (type) {
4360 case HCI_EVENT_PKT:
4361 if (skb->len == HCI_EVENT_HDR_SIZE) {
4362 struct hci_event_hdr *h = hci_event_hdr(skb);
4363 scb->expect = h->plen;
4364
4365 if (skb_tailroom(skb) < scb->expect) {
4366 kfree_skb(skb);
4367 hdev->reassembly[index] = NULL;
4368 return -ENOMEM;
4369 }
4370 }
4371 break;
4372
4373 case HCI_ACLDATA_PKT:
4374 if (skb->len == HCI_ACL_HDR_SIZE) {
4375 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4376 scb->expect = __le16_to_cpu(h->dlen);
4377
4378 if (skb_tailroom(skb) < scb->expect) {
4379 kfree_skb(skb);
4380 hdev->reassembly[index] = NULL;
4381 return -ENOMEM;
4382 }
4383 }
4384 break;
4385
4386 case HCI_SCODATA_PKT:
4387 if (skb->len == HCI_SCO_HDR_SIZE) {
4388 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4389 scb->expect = h->dlen;
4390
4391 if (skb_tailroom(skb) < scb->expect) {
4392 kfree_skb(skb);
4393 hdev->reassembly[index] = NULL;
4394 return -ENOMEM;
4395 }
4396 }
4397 break;
4398 }
4399
4400 if (scb->expect == 0) {
4401 /* Complete frame */
4402
4403 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004404 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304405
4406 hdev->reassembly[index] = NULL;
4407 return remain;
4408 }
4409 }
4410
4411 return remain;
4412}
4413
Suraj Sumangala99811512010-07-14 13:02:19 +05304414#define STREAM_REASSEMBLY 0
4415
4416int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4417{
4418 int type;
4419 int rem = 0;
4420
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004421 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304422 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4423
4424 if (!skb) {
4425 struct { char type; } *pkt;
4426
4427 /* Start of the frame */
4428 pkt = data;
4429 type = pkt->type;
4430
4431 data++;
4432 count--;
4433 } else
4434 type = bt_cb(skb)->pkt_type;
4435
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004436 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004437 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304438 if (rem < 0)
4439 return rem;
4440
4441 data += (count - rem);
4442 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004443 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304444
4445 return rem;
4446}
4447EXPORT_SYMBOL(hci_recv_stream_fragment);
4448
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449/* ---- Interface to upper protocols ---- */
4450
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451int hci_register_cb(struct hci_cb *cb)
4452{
4453 BT_DBG("%p name %s", cb, cb->name);
4454
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004455 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004457 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458
4459 return 0;
4460}
4461EXPORT_SYMBOL(hci_register_cb);
4462
4463int hci_unregister_cb(struct hci_cb *cb)
4464{
4465 BT_DBG("%p name %s", cb, cb->name);
4466
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004467 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004469 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470
4471 return 0;
4472}
4473EXPORT_SYMBOL(hci_unregister_cb);
4474
Marcel Holtmann51086992013-10-10 14:54:19 -07004475static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004477 int err;
4478
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004479 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004481 /* Time stamp */
4482 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004484 /* Send copy to monitor */
4485 hci_send_to_monitor(hdev, skb);
4486
4487 if (atomic_read(&hdev->promisc)) {
4488 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004489 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490 }
4491
4492 /* Get rid of skb owner, prior to sending to the driver. */
4493 skb_orphan(skb);
4494
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004495 err = hdev->send(hdev, skb);
4496 if (err < 0) {
4497 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4498 kfree_skb(skb);
4499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500}
4501
Johan Hedberg3119ae92013-03-05 20:37:44 +02004502void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4503{
4504 skb_queue_head_init(&req->cmd_q);
4505 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004506 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004507}
4508
4509int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4510{
4511 struct hci_dev *hdev = req->hdev;
4512 struct sk_buff *skb;
4513 unsigned long flags;
4514
4515 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4516
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004517 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004518 * commands queued on the HCI request queue.
4519 */
4520 if (req->err) {
4521 skb_queue_purge(&req->cmd_q);
4522 return req->err;
4523 }
4524
Johan Hedberg3119ae92013-03-05 20:37:44 +02004525 /* Do not allow empty requests */
4526 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004527 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004528
4529 skb = skb_peek_tail(&req->cmd_q);
4530 bt_cb(skb)->req.complete = complete;
4531
4532 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4533 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4534 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4535
4536 queue_work(hdev->workqueue, &hdev->cmd_work);
4537
4538 return 0;
4539}
4540
Marcel Holtmann899de762014-07-11 05:51:58 +02004541bool hci_req_pending(struct hci_dev *hdev)
4542{
4543 return (hdev->req_status == HCI_REQ_PEND);
4544}
4545
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004546static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004547 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548{
4549 int len = HCI_COMMAND_HDR_SIZE + plen;
4550 struct hci_command_hdr *hdr;
4551 struct sk_buff *skb;
4552
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004554 if (!skb)
4555 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556
4557 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004558 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 hdr->plen = plen;
4560
4561 if (plen)
4562 memcpy(skb_put(skb, plen), param, plen);
4563
4564 BT_DBG("skb len %d", skb->len);
4565
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004566 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004567 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004568
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004569 return skb;
4570}
4571
4572/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004573int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4574 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004575{
4576 struct sk_buff *skb;
4577
4578 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4579
4580 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4581 if (!skb) {
4582 BT_ERR("%s no memory for command", hdev->name);
4583 return -ENOMEM;
4584 }
4585
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004586 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004587 * single-command requests.
4588 */
4589 bt_cb(skb)->req.start = true;
4590
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004592 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593
4594 return 0;
4595}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596
Johan Hedberg71c76a12013-03-05 20:37:46 +02004597/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004598void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4599 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004600{
4601 struct hci_dev *hdev = req->hdev;
4602 struct sk_buff *skb;
4603
4604 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4605
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004606 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004607 * queueing the HCI command. We can simply return.
4608 */
4609 if (req->err)
4610 return;
4611
Johan Hedberg71c76a12013-03-05 20:37:46 +02004612 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4613 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004614 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4615 hdev->name, opcode);
4616 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004617 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004618 }
4619
4620 if (skb_queue_empty(&req->cmd_q))
4621 bt_cb(skb)->req.start = true;
4622
Johan Hedberg02350a72013-04-03 21:50:29 +03004623 bt_cb(skb)->req.event = event;
4624
Johan Hedberg71c76a12013-03-05 20:37:46 +02004625 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004626}
4627
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004628void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4629 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004630{
4631 hci_req_add_ev(req, opcode, plen, param, 0);
4632}
4633
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004635void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636{
4637 struct hci_command_hdr *hdr;
4638
4639 if (!hdev->sent_cmd)
4640 return NULL;
4641
4642 hdr = (void *) hdev->sent_cmd->data;
4643
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004644 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645 return NULL;
4646
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004647 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648
4649 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4650}
4651
4652/* Send ACL data */
4653static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4654{
4655 struct hci_acl_hdr *hdr;
4656 int len = skb->len;
4657
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004658 skb_push(skb, HCI_ACL_HDR_SIZE);
4659 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004660 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004661 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4662 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663}
4664
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004665static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004666 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004668 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 struct hci_dev *hdev = conn->hdev;
4670 struct sk_buff *list;
4671
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004672 skb->len = skb_headlen(skb);
4673 skb->data_len = 0;
4674
4675 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004676
4677 switch (hdev->dev_type) {
4678 case HCI_BREDR:
4679 hci_add_acl_hdr(skb, conn->handle, flags);
4680 break;
4681 case HCI_AMP:
4682 hci_add_acl_hdr(skb, chan->handle, flags);
4683 break;
4684 default:
4685 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4686 return;
4687 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004688
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004689 list = skb_shinfo(skb)->frag_list;
4690 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691 /* Non fragmented */
4692 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4693
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004694 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695 } else {
4696 /* Fragmented */
4697 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4698
4699 skb_shinfo(skb)->frag_list = NULL;
4700
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004701 /* Queue all fragments atomically. We need to use spin_lock_bh
4702 * here because of 6LoWPAN links, as there this function is
4703 * called from softirq and using normal spin lock could cause
4704 * deadlocks.
4705 */
4706 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004707
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004708 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004709
4710 flags &= ~ACL_START;
4711 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712 do {
4713 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004714
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004715 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004716 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717
4718 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4719
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004720 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721 } while (list);
4722
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004723 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004725}
4726
4727void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4728{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004729 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004730
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004731 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004732
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004733 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004735 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737
4738/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004739void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740{
4741 struct hci_dev *hdev = conn->hdev;
4742 struct hci_sco_hdr hdr;
4743
4744 BT_DBG("%s len %d", hdev->name, skb->len);
4745
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004746 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747 hdr.dlen = skb->len;
4748
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004749 skb_push(skb, HCI_SCO_HDR_SIZE);
4750 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004751 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004753 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004754
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004756 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004758
4759/* ---- HCI TX task (outgoing data) ---- */
4760
4761/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004762static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4763 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764{
4765 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004766 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004767 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004769 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004771
4772 rcu_read_lock();
4773
4774 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004775 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004777
4778 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4779 continue;
4780
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781 num++;
4782
4783 if (c->sent < min) {
4784 min = c->sent;
4785 conn = c;
4786 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004787
4788 if (hci_conn_num(hdev, type) == num)
4789 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790 }
4791
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004792 rcu_read_unlock();
4793
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004795 int cnt, q;
4796
4797 switch (conn->type) {
4798 case ACL_LINK:
4799 cnt = hdev->acl_cnt;
4800 break;
4801 case SCO_LINK:
4802 case ESCO_LINK:
4803 cnt = hdev->sco_cnt;
4804 break;
4805 case LE_LINK:
4806 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4807 break;
4808 default:
4809 cnt = 0;
4810 BT_ERR("Unknown link type");
4811 }
4812
4813 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 *quote = q ? q : 1;
4815 } else
4816 *quote = 0;
4817
4818 BT_DBG("conn %p quote %d", conn, *quote);
4819 return conn;
4820}
4821
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004822static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823{
4824 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004825 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826
Ville Tervobae1f5d92011-02-10 22:38:53 -03004827 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004829 rcu_read_lock();
4830
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004832 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004833 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004834 BT_ERR("%s killing stalled connection %pMR",
4835 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004836 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837 }
4838 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004839
4840 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004841}
4842
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004843static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4844 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004845{
4846 struct hci_conn_hash *h = &hdev->conn_hash;
4847 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004848 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004849 struct hci_conn *conn;
4850 int cnt, q, conn_num = 0;
4851
4852 BT_DBG("%s", hdev->name);
4853
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004854 rcu_read_lock();
4855
4856 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004857 struct hci_chan *tmp;
4858
4859 if (conn->type != type)
4860 continue;
4861
4862 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4863 continue;
4864
4865 conn_num++;
4866
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004867 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004868 struct sk_buff *skb;
4869
4870 if (skb_queue_empty(&tmp->data_q))
4871 continue;
4872
4873 skb = skb_peek(&tmp->data_q);
4874 if (skb->priority < cur_prio)
4875 continue;
4876
4877 if (skb->priority > cur_prio) {
4878 num = 0;
4879 min = ~0;
4880 cur_prio = skb->priority;
4881 }
4882
4883 num++;
4884
4885 if (conn->sent < min) {
4886 min = conn->sent;
4887 chan = tmp;
4888 }
4889 }
4890
4891 if (hci_conn_num(hdev, type) == conn_num)
4892 break;
4893 }
4894
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004895 rcu_read_unlock();
4896
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004897 if (!chan)
4898 return NULL;
4899
4900 switch (chan->conn->type) {
4901 case ACL_LINK:
4902 cnt = hdev->acl_cnt;
4903 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004904 case AMP_LINK:
4905 cnt = hdev->block_cnt;
4906 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004907 case SCO_LINK:
4908 case ESCO_LINK:
4909 cnt = hdev->sco_cnt;
4910 break;
4911 case LE_LINK:
4912 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4913 break;
4914 default:
4915 cnt = 0;
4916 BT_ERR("Unknown link type");
4917 }
4918
4919 q = cnt / num;
4920 *quote = q ? q : 1;
4921 BT_DBG("chan %p quote %d", chan, *quote);
4922 return chan;
4923}
4924
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004925static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4926{
4927 struct hci_conn_hash *h = &hdev->conn_hash;
4928 struct hci_conn *conn;
4929 int num = 0;
4930
4931 BT_DBG("%s", hdev->name);
4932
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004933 rcu_read_lock();
4934
4935 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004936 struct hci_chan *chan;
4937
4938 if (conn->type != type)
4939 continue;
4940
4941 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4942 continue;
4943
4944 num++;
4945
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004946 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004947 struct sk_buff *skb;
4948
4949 if (chan->sent) {
4950 chan->sent = 0;
4951 continue;
4952 }
4953
4954 if (skb_queue_empty(&chan->data_q))
4955 continue;
4956
4957 skb = skb_peek(&chan->data_q);
4958 if (skb->priority >= HCI_PRIO_MAX - 1)
4959 continue;
4960
4961 skb->priority = HCI_PRIO_MAX - 1;
4962
4963 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004964 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004965 }
4966
4967 if (hci_conn_num(hdev, type) == num)
4968 break;
4969 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004970
4971 rcu_read_unlock();
4972
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004973}
4974
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004975static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4976{
4977 /* Calculate count of blocks used by this packet */
4978 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4979}
4980
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004981static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004982{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004983 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 /* ACL tx timeout must be longer than maximum
4985 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004986 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004987 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004988 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004990}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004992static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004993{
4994 unsigned int cnt = hdev->acl_cnt;
4995 struct hci_chan *chan;
4996 struct sk_buff *skb;
4997 int quote;
4998
4999 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005000
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005001 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005002 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005003 u32 priority = (skb_peek(&chan->data_q))->priority;
5004 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005005 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005006 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005007
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005008 /* Stop if priority has changed */
5009 if (skb->priority < priority)
5010 break;
5011
5012 skb = skb_dequeue(&chan->data_q);
5013
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005014 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005015 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005016
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005017 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018 hdev->acl_last_tx = jiffies;
5019
5020 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005021 chan->sent++;
5022 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023 }
5024 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005025
5026 if (cnt != hdev->acl_cnt)
5027 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028}
5029
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005030static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005031{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005032 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005033 struct hci_chan *chan;
5034 struct sk_buff *skb;
5035 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005036 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005037
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005038 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005039
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005040 BT_DBG("%s", hdev->name);
5041
5042 if (hdev->dev_type == HCI_AMP)
5043 type = AMP_LINK;
5044 else
5045 type = ACL_LINK;
5046
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005047 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005048 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005049 u32 priority = (skb_peek(&chan->data_q))->priority;
5050 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5051 int blocks;
5052
5053 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005054 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005055
5056 /* Stop if priority has changed */
5057 if (skb->priority < priority)
5058 break;
5059
5060 skb = skb_dequeue(&chan->data_q);
5061
5062 blocks = __get_blocks(hdev, skb);
5063 if (blocks > hdev->block_cnt)
5064 return;
5065
5066 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005067 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005068
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005069 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005070 hdev->acl_last_tx = jiffies;
5071
5072 hdev->block_cnt -= blocks;
5073 quote -= blocks;
5074
5075 chan->sent += blocks;
5076 chan->conn->sent += blocks;
5077 }
5078 }
5079
5080 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005081 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005082}
5083
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005084static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005085{
5086 BT_DBG("%s", hdev->name);
5087
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005088 /* No ACL link over BR/EDR controller */
5089 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5090 return;
5091
5092 /* No AMP link over AMP controller */
5093 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005094 return;
5095
5096 switch (hdev->flow_ctl_mode) {
5097 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5098 hci_sched_acl_pkt(hdev);
5099 break;
5100
5101 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5102 hci_sched_acl_blk(hdev);
5103 break;
5104 }
5105}
5106
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005108static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109{
5110 struct hci_conn *conn;
5111 struct sk_buff *skb;
5112 int quote;
5113
5114 BT_DBG("%s", hdev->name);
5115
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005116 if (!hci_conn_num(hdev, SCO_LINK))
5117 return;
5118
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5120 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5121 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005122 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123
5124 conn->sent++;
5125 if (conn->sent == ~0)
5126 conn->sent = 0;
5127 }
5128 }
5129}
5130
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005131static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005132{
5133 struct hci_conn *conn;
5134 struct sk_buff *skb;
5135 int quote;
5136
5137 BT_DBG("%s", hdev->name);
5138
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005139 if (!hci_conn_num(hdev, ESCO_LINK))
5140 return;
5141
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005142 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5143 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005144 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5145 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005146 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005147
5148 conn->sent++;
5149 if (conn->sent == ~0)
5150 conn->sent = 0;
5151 }
5152 }
5153}
5154
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005155static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005156{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005157 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005158 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005159 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005160
5161 BT_DBG("%s", hdev->name);
5162
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005163 if (!hci_conn_num(hdev, LE_LINK))
5164 return;
5165
Marcel Holtmann4a964402014-07-02 19:10:33 +02005166 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005167 /* LE tx timeout must be longer than maximum
5168 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005169 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005170 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005171 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005172 }
5173
5174 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005175 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005176 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005177 u32 priority = (skb_peek(&chan->data_q))->priority;
5178 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005179 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005180 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005181
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005182 /* Stop if priority has changed */
5183 if (skb->priority < priority)
5184 break;
5185
5186 skb = skb_dequeue(&chan->data_q);
5187
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005188 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005189 hdev->le_last_tx = jiffies;
5190
5191 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005192 chan->sent++;
5193 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005194 }
5195 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005196
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005197 if (hdev->le_pkts)
5198 hdev->le_cnt = cnt;
5199 else
5200 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005201
5202 if (cnt != tmp)
5203 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005204}
5205
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005206static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005208 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209 struct sk_buff *skb;
5210
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005211 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005212 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213
Marcel Holtmann52de5992013-09-03 18:08:38 -07005214 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5215 /* Schedule queues and send stuff to HCI driver */
5216 hci_sched_acl(hdev);
5217 hci_sched_sco(hdev);
5218 hci_sched_esco(hdev);
5219 hci_sched_le(hdev);
5220 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005221
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222 /* Send next queued raw (unknown type) packet */
5223 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005224 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225}
5226
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005227/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228
5229/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005230static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231{
5232 struct hci_acl_hdr *hdr = (void *) skb->data;
5233 struct hci_conn *conn;
5234 __u16 handle, flags;
5235
5236 skb_pull(skb, HCI_ACL_HDR_SIZE);
5237
5238 handle = __le16_to_cpu(hdr->handle);
5239 flags = hci_flags(handle);
5240 handle = hci_handle(handle);
5241
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005242 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005243 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244
5245 hdev->stat.acl_rx++;
5246
5247 hci_dev_lock(hdev);
5248 conn = hci_conn_hash_lookup_handle(hdev, handle);
5249 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005250
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005252 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005253
Linus Torvalds1da177e2005-04-16 15:20:36 -07005254 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005255 l2cap_recv_acldata(conn, skb, flags);
5256 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005258 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005259 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260 }
5261
5262 kfree_skb(skb);
5263}
5264
5265/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005266static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267{
5268 struct hci_sco_hdr *hdr = (void *) skb->data;
5269 struct hci_conn *conn;
5270 __u16 handle;
5271
5272 skb_pull(skb, HCI_SCO_HDR_SIZE);
5273
5274 handle = __le16_to_cpu(hdr->handle);
5275
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005276 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277
5278 hdev->stat.sco_rx++;
5279
5280 hci_dev_lock(hdev);
5281 conn = hci_conn_hash_lookup_handle(hdev, handle);
5282 hci_dev_unlock(hdev);
5283
5284 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005285 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005286 sco_recv_scodata(conn, skb);
5287 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005289 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005290 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291 }
5292
5293 kfree_skb(skb);
5294}
5295
Johan Hedberg9238f362013-03-05 20:37:48 +02005296static bool hci_req_is_complete(struct hci_dev *hdev)
5297{
5298 struct sk_buff *skb;
5299
5300 skb = skb_peek(&hdev->cmd_q);
5301 if (!skb)
5302 return true;
5303
5304 return bt_cb(skb)->req.start;
5305}
5306
Johan Hedberg42c6b122013-03-05 20:37:49 +02005307static void hci_resend_last(struct hci_dev *hdev)
5308{
5309 struct hci_command_hdr *sent;
5310 struct sk_buff *skb;
5311 u16 opcode;
5312
5313 if (!hdev->sent_cmd)
5314 return;
5315
5316 sent = (void *) hdev->sent_cmd->data;
5317 opcode = __le16_to_cpu(sent->opcode);
5318 if (opcode == HCI_OP_RESET)
5319 return;
5320
5321 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5322 if (!skb)
5323 return;
5324
5325 skb_queue_head(&hdev->cmd_q, skb);
5326 queue_work(hdev->workqueue, &hdev->cmd_work);
5327}
5328
Johan Hedberg9238f362013-03-05 20:37:48 +02005329void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5330{
5331 hci_req_complete_t req_complete = NULL;
5332 struct sk_buff *skb;
5333 unsigned long flags;
5334
5335 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5336
Johan Hedberg42c6b122013-03-05 20:37:49 +02005337 /* If the completed command doesn't match the last one that was
5338 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005339 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005340 if (!hci_sent_cmd_data(hdev, opcode)) {
5341 /* Some CSR based controllers generate a spontaneous
5342 * reset complete event during init and any pending
5343 * command will never be completed. In such a case we
5344 * need to resend whatever was the last sent
5345 * command.
5346 */
5347 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5348 hci_resend_last(hdev);
5349
Johan Hedberg9238f362013-03-05 20:37:48 +02005350 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005351 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005352
5353 /* If the command succeeded and there's still more commands in
5354 * this request the request is not yet complete.
5355 */
5356 if (!status && !hci_req_is_complete(hdev))
5357 return;
5358
5359 /* If this was the last command in a request the complete
5360 * callback would be found in hdev->sent_cmd instead of the
5361 * command queue (hdev->cmd_q).
5362 */
5363 if (hdev->sent_cmd) {
5364 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005365
5366 if (req_complete) {
5367 /* We must set the complete callback to NULL to
5368 * avoid calling the callback more than once if
5369 * this function gets called again.
5370 */
5371 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5372
Johan Hedberg9238f362013-03-05 20:37:48 +02005373 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005374 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005375 }
5376
5377 /* Remove all pending commands belonging to this request */
5378 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5379 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5380 if (bt_cb(skb)->req.start) {
5381 __skb_queue_head(&hdev->cmd_q, skb);
5382 break;
5383 }
5384
5385 req_complete = bt_cb(skb)->req.complete;
5386 kfree_skb(skb);
5387 }
5388 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5389
5390call_complete:
5391 if (req_complete)
5392 req_complete(hdev, status);
5393}
5394
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005395static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005396{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005397 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005398 struct sk_buff *skb;
5399
5400 BT_DBG("%s", hdev->name);
5401
Linus Torvalds1da177e2005-04-16 15:20:36 -07005402 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005403 /* Send copy to monitor */
5404 hci_send_to_monitor(hdev, skb);
5405
Linus Torvalds1da177e2005-04-16 15:20:36 -07005406 if (atomic_read(&hdev->promisc)) {
5407 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005408 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409 }
5410
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005411 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005412 kfree_skb(skb);
5413 continue;
5414 }
5415
5416 if (test_bit(HCI_INIT, &hdev->flags)) {
5417 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005418 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419 case HCI_ACLDATA_PKT:
5420 case HCI_SCODATA_PKT:
5421 kfree_skb(skb);
5422 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 }
5425
5426 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005427 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005429 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430 hci_event_packet(hdev, skb);
5431 break;
5432
5433 case HCI_ACLDATA_PKT:
5434 BT_DBG("%s ACL data packet", hdev->name);
5435 hci_acldata_packet(hdev, skb);
5436 break;
5437
5438 case HCI_SCODATA_PKT:
5439 BT_DBG("%s SCO data packet", hdev->name);
5440 hci_scodata_packet(hdev, skb);
5441 break;
5442
5443 default:
5444 kfree_skb(skb);
5445 break;
5446 }
5447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448}
5449
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005450static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005452 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453 struct sk_buff *skb;
5454
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005455 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5456 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005459 if (atomic_read(&hdev->cmd_cnt)) {
5460 skb = skb_dequeue(&hdev->cmd_q);
5461 if (!skb)
5462 return;
5463
Wei Yongjun7585b972009-02-25 18:29:52 +08005464 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005466 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005467 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005468 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005469 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005470 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005471 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005472 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005473 schedule_delayed_work(&hdev->cmd_timer,
5474 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005475 } else {
5476 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005477 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005478 }
5479 }
5480}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005481
5482void hci_req_add_le_scan_disable(struct hci_request *req)
5483{
5484 struct hci_cp_le_set_scan_enable cp;
5485
5486 memset(&cp, 0, sizeof(cp));
5487 cp.enable = LE_SCAN_DISABLE;
5488 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5489}
Andre Guedesa4790db2014-02-26 20:21:47 -03005490
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005491static void add_to_white_list(struct hci_request *req,
5492 struct hci_conn_params *params)
5493{
5494 struct hci_cp_le_add_to_white_list cp;
5495
5496 cp.bdaddr_type = params->addr_type;
5497 bacpy(&cp.bdaddr, &params->addr);
5498
5499 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5500}
5501
5502static u8 update_white_list(struct hci_request *req)
5503{
5504 struct hci_dev *hdev = req->hdev;
5505 struct hci_conn_params *params;
5506 struct bdaddr_list *b;
5507 uint8_t white_list_entries = 0;
5508
5509 /* Go through the current white list programmed into the
5510 * controller one by one and check if that address is still
5511 * in the list of pending connections or list of devices to
5512 * report. If not present in either list, then queue the
5513 * command to remove it from the controller.
5514 */
5515 list_for_each_entry(b, &hdev->le_white_list, list) {
5516 struct hci_cp_le_del_from_white_list cp;
5517
5518 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5519 &b->bdaddr, b->bdaddr_type) ||
5520 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5521 &b->bdaddr, b->bdaddr_type)) {
5522 white_list_entries++;
5523 continue;
5524 }
5525
5526 cp.bdaddr_type = b->bdaddr_type;
5527 bacpy(&cp.bdaddr, &b->bdaddr);
5528
5529 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5530 sizeof(cp), &cp);
5531 }
5532
5533 /* Since all no longer valid white list entries have been
5534 * removed, walk through the list of pending connections
5535 * and ensure that any new device gets programmed into
5536 * the controller.
5537 *
5538 * If the list of the devices is larger than the list of
5539 * available white list entries in the controller, then
5540 * just abort and return filer policy value to not use the
5541 * white list.
5542 */
5543 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5544 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5545 &params->addr, params->addr_type))
5546 continue;
5547
5548 if (white_list_entries >= hdev->le_white_list_size) {
5549 /* Select filter policy to accept all advertising */
5550 return 0x00;
5551 }
5552
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005553 if (hci_find_irk_by_addr(hdev, &params->addr,
5554 params->addr_type)) {
5555 /* White list can not be used with RPAs */
5556 return 0x00;
5557 }
5558
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005559 white_list_entries++;
5560 add_to_white_list(req, params);
5561 }
5562
5563 /* After adding all new pending connections, walk through
5564 * the list of pending reports and also add these to the
5565 * white list if there is still space.
5566 */
5567 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5568 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5569 &params->addr, params->addr_type))
5570 continue;
5571
5572 if (white_list_entries >= hdev->le_white_list_size) {
5573 /* Select filter policy to accept all advertising */
5574 return 0x00;
5575 }
5576
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005577 if (hci_find_irk_by_addr(hdev, &params->addr,
5578 params->addr_type)) {
5579 /* White list can not be used with RPAs */
5580 return 0x00;
5581 }
5582
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005583 white_list_entries++;
5584 add_to_white_list(req, params);
5585 }
5586
5587 /* Select filter policy to use white list */
5588 return 0x01;
5589}
5590
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005591void hci_req_add_le_passive_scan(struct hci_request *req)
5592{
5593 struct hci_cp_le_set_scan_param param_cp;
5594 struct hci_cp_le_set_scan_enable enable_cp;
5595 struct hci_dev *hdev = req->hdev;
5596 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005597 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005598
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005599 /* Set require_privacy to false since no SCAN_REQ are send
5600 * during passive scanning. Not using an unresolvable address
5601 * here is important so that peer devices using direct
5602 * advertising with our address will be correctly reported
5603 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005604 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005605 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005606 return;
5607
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005608 /* Adding or removing entries from the white list must
5609 * happen before enabling scanning. The controller does
5610 * not allow white list modification while scanning.
5611 */
5612 filter_policy = update_white_list(req);
5613
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005614 memset(&param_cp, 0, sizeof(param_cp));
5615 param_cp.type = LE_SCAN_PASSIVE;
5616 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5617 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5618 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005619 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005620 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5621 &param_cp);
5622
5623 memset(&enable_cp, 0, sizeof(enable_cp));
5624 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005625 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005626 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5627 &enable_cp);
5628}
5629
Andre Guedesa4790db2014-02-26 20:21:47 -03005630static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5631{
5632 if (status)
5633 BT_DBG("HCI request failed to update background scanning: "
5634 "status 0x%2.2x", status);
5635}
5636
5637/* This function controls the background scanning based on hdev->pend_le_conns
5638 * list. If there are pending LE connection we start the background scanning,
5639 * otherwise we stop it.
5640 *
5641 * This function requires the caller holds hdev->lock.
5642 */
5643void hci_update_background_scan(struct hci_dev *hdev)
5644{
Andre Guedesa4790db2014-02-26 20:21:47 -03005645 struct hci_request req;
5646 struct hci_conn *conn;
5647 int err;
5648
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005649 if (!test_bit(HCI_UP, &hdev->flags) ||
5650 test_bit(HCI_INIT, &hdev->flags) ||
5651 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005652 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005653 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005654 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005655 return;
5656
Johan Hedberga70f4b52014-07-07 15:19:50 +03005657 /* No point in doing scanning if LE support hasn't been enabled */
5658 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5659 return;
5660
Johan Hedbergae23ada2014-07-07 13:24:59 +03005661 /* If discovery is active don't interfere with it */
5662 if (hdev->discovery.state != DISCOVERY_STOPPED)
5663 return;
5664
Andre Guedesa4790db2014-02-26 20:21:47 -03005665 hci_req_init(&req, hdev);
5666
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005667 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005668 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005669 /* If there is no pending LE connections or devices
5670 * to be scanned for, we should stop the background
5671 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005672 */
5673
5674 /* If controller is not scanning we are done. */
5675 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5676 return;
5677
5678 hci_req_add_le_scan_disable(&req);
5679
5680 BT_DBG("%s stopping background scanning", hdev->name);
5681 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005682 /* If there is at least one pending LE connection, we should
5683 * keep the background scan running.
5684 */
5685
Andre Guedesa4790db2014-02-26 20:21:47 -03005686 /* If controller is connecting, we should not start scanning
5687 * since some controllers are not able to scan and connect at
5688 * the same time.
5689 */
5690 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5691 if (conn)
5692 return;
5693
Andre Guedes4340a122014-03-10 18:26:24 -03005694 /* If controller is currently scanning, we stop it to ensure we
5695 * don't miss any advertising (due to duplicates filter).
5696 */
5697 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5698 hci_req_add_le_scan_disable(&req);
5699
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005700 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005701
5702 BT_DBG("%s starting background scanning", hdev->name);
5703 }
5704
5705 err = hci_req_run(&req, update_background_scan_complete);
5706 if (err)
5707 BT_ERR("Failed to run HCI request: err %d", err);
5708}
Johan Hedberg432df052014-08-01 11:13:31 +03005709
Johan Hedberg22f433d2014-08-01 11:13:32 +03005710static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5711{
5712 struct bdaddr_list *b;
5713
5714 list_for_each_entry(b, &hdev->whitelist, list) {
5715 struct hci_conn *conn;
5716
5717 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5718 if (!conn)
5719 return true;
5720
5721 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5722 return true;
5723 }
5724
5725 return false;
5726}
5727
Johan Hedberg432df052014-08-01 11:13:31 +03005728void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5729{
5730 u8 scan;
5731
5732 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5733 return;
5734
5735 if (!hdev_is_powered(hdev))
5736 return;
5737
5738 if (mgmt_powering_down(hdev))
5739 return;
5740
5741 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005742 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005743 scan = SCAN_PAGE;
5744 else
5745 scan = SCAN_DISABLED;
5746
5747 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5748 return;
5749
5750 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5751 scan |= SCAN_INQUIRY;
5752
5753 if (req)
5754 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5755 else
5756 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5757}