blob: 067526d9680de91bfd5921fa539b0fc287f14af1 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Johan Hedberg66593582014-07-09 12:59:14 +0300203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmann47219832013-10-17 17:24:15 -0700228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700236
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700243
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700244 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
Marcel Holtmann041000b2013-10-17 12:02:31 -0700351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
Marcel Holtmann111902f2014-06-21 04:53:17 +0200421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800422 return -EALREADY;
423
Marcel Holtmann111902f2014-06-21 04:53:17 +0200424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700462 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700521 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700549 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
Marcel Holtmannac345812014-02-23 12:44:25 -0800625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200628 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
Johan Hedberga1f4c312014-02-27 14:05:41 +0200633 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800634
Johan Hedberga1f4c312014-02-27 14:05:41 +0200635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800636 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700704{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712}
713
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
722
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
725
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700731 return -EINVAL;
732
Marcel Holtmann111902f2014-06-21 04:53:17 +0200733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800734 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700735
Marcel Holtmann111902f2014-06-21 04:53:17 +0200736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800737
738 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700739}
740
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
Marcel Holtmann92202182013-10-18 16:38:10 -0700747
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
Marcel Holtmann3698d702014-02-18 21:54:49 -0800773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800809 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800814 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700869 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
Marcel Holtmannf1649572014-06-30 12:34:38 +0200917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
Georg Lukas729a1052014-07-26 13:59:58 +0200973static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200974{
Georg Lukas729a1052014-07-26 13:59:58 +0200975 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200976
Georg Lukas729a1052014-07-26 13:59:58 +0200977 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200978 return -EINVAL;
979
Andre Guedes7d474e02014-02-26 20:21:54 -0300980 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200981 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300982 hci_dev_unlock(hdev);
983
984 return 0;
985}
986
Georg Lukas729a1052014-07-26 13:59:58 +0200987static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300988{
Georg Lukas729a1052014-07-26 13:59:58 +0200989 struct hci_dev *hdev = data;
990
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_min_interval;
993 hci_dev_unlock(hdev);
994
995 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300996}
997
Georg Lukas729a1052014-07-26 13:59:58 +0200998DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999 adv_min_interval_set, "%llu\n");
1000
1001static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001002{
Georg Lukas729a1052014-07-26 13:59:58 +02001003 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001004
Georg Lukas729a1052014-07-26 13:59:58 +02001005 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001006 return -EINVAL;
1007
Georg Lukas729a1052014-07-26 13:59:58 +02001008 hci_dev_lock(hdev);
1009 hdev->le_adv_max_interval = val;
1010 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001011
Georg Lukas729a1052014-07-26 13:59:58 +02001012 return 0;
1013}
Andre Guedes7d474e02014-02-26 20:21:54 -03001014
Georg Lukas729a1052014-07-26 13:59:58 +02001015static int adv_max_interval_get(void *data, u64 *val)
1016{
1017 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001018
Georg Lukas729a1052014-07-26 13:59:58 +02001019 hci_dev_lock(hdev);
1020 *val = hdev->le_adv_max_interval;
1021 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001022
Georg Lukas729a1052014-07-26 13:59:58 +02001023 return 0;
1024}
Andre Guedes7d474e02014-02-26 20:21:54 -03001025
Georg Lukas729a1052014-07-26 13:59:58 +02001026DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001028
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001029static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001030{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001031 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001032 struct hci_conn_params *p;
Andre Guedes7d474e02014-02-26 20:21:54 -03001033
Andre Guedes7d474e02014-02-26 20:21:54 -03001034 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001035 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001036 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001037 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001038 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001039 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001040
Andre Guedes7d474e02014-02-26 20:21:54 -03001041 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001042}
1043
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001044static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001045{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001046 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001047}
1048
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001049static const struct file_operations device_list_fops = {
1050 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001051 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001052 .llseek = seq_lseek,
1053 .release = single_release,
1054};
1055
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056/* ---- HCI requests ---- */
1057
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001060 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
1062 if (hdev->req_status == HCI_REQ_PEND) {
1063 hdev->req_result = result;
1064 hdev->req_status = HCI_REQ_DONE;
1065 wake_up_interruptible(&hdev->req_wait_q);
1066 }
1067}
1068
1069static void hci_req_cancel(struct hci_dev *hdev, int err)
1070{
1071 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073 if (hdev->req_status == HCI_REQ_PEND) {
1074 hdev->req_result = err;
1075 hdev->req_status = HCI_REQ_CANCELED;
1076 wake_up_interruptible(&hdev->req_wait_q);
1077 }
1078}
1079
Fengguang Wu77a63e02013-04-20 16:24:31 +03001080static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001082{
1083 struct hci_ev_cmd_complete *ev;
1084 struct hci_event_hdr *hdr;
1085 struct sk_buff *skb;
1086
1087 hci_dev_lock(hdev);
1088
1089 skb = hdev->recv_evt;
1090 hdev->recv_evt = NULL;
1091
1092 hci_dev_unlock(hdev);
1093
1094 if (!skb)
1095 return ERR_PTR(-ENODATA);
1096
1097 if (skb->len < sizeof(*hdr)) {
1098 BT_ERR("Too short HCI event");
1099 goto failed;
1100 }
1101
1102 hdr = (void *) skb->data;
1103 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001105 if (event) {
1106 if (hdr->evt != event)
1107 goto failed;
1108 return skb;
1109 }
1110
Johan Hedberg75e84b72013-04-02 13:35:04 +03001111 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113 goto failed;
1114 }
1115
1116 if (skb->len < sizeof(*ev)) {
1117 BT_ERR("Too short cmd_complete event");
1118 goto failed;
1119 }
1120
1121 ev = (void *) skb->data;
1122 skb_pull(skb, sizeof(*ev));
1123
1124 if (opcode == __le16_to_cpu(ev->opcode))
1125 return skb;
1126
1127 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128 __le16_to_cpu(ev->opcode));
1129
1130failed:
1131 kfree_skb(skb);
1132 return ERR_PTR(-ENODATA);
1133}
1134
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001135struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001136 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137{
1138 DECLARE_WAITQUEUE(wait, current);
1139 struct hci_request req;
1140 int err = 0;
1141
1142 BT_DBG("%s", hdev->name);
1143
1144 hci_req_init(&req, hdev);
1145
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001146 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001147
1148 hdev->req_status = HCI_REQ_PEND;
1149
1150 err = hci_req_run(&req, hci_req_sync_complete);
1151 if (err < 0)
1152 return ERR_PTR(err);
1153
1154 add_wait_queue(&hdev->req_wait_q, &wait);
1155 set_current_state(TASK_INTERRUPTIBLE);
1156
1157 schedule_timeout(timeout);
1158
1159 remove_wait_queue(&hdev->req_wait_q, &wait);
1160
1161 if (signal_pending(current))
1162 return ERR_PTR(-EINTR);
1163
1164 switch (hdev->req_status) {
1165 case HCI_REQ_DONE:
1166 err = -bt_to_errno(hdev->req_result);
1167 break;
1168
1169 case HCI_REQ_CANCELED:
1170 err = -hdev->req_result;
1171 break;
1172
1173 default:
1174 err = -ETIMEDOUT;
1175 break;
1176 }
1177
1178 hdev->req_status = hdev->req_result = 0;
1179
1180 BT_DBG("%s end: err %d", hdev->name, err);
1181
1182 if (err < 0)
1183 return ERR_PTR(err);
1184
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001185 return hci_get_cmd_complete(hdev, opcode, event);
1186}
1187EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188
1189struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001190 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001191{
1192 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001193}
1194EXPORT_SYMBOL(__hci_cmd_sync);
1195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001197static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001198 void (*func)(struct hci_request *req,
1199 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001200 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 DECLARE_WAITQUEUE(wait, current);
1204 int err = 0;
1205
1206 BT_DBG("%s start", hdev->name);
1207
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_init(&req, hdev);
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 hdev->req_status = HCI_REQ_PEND;
1211
Johan Hedberg42c6b122013-03-05 20:37:49 +02001212 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001213
Johan Hedberg42c6b122013-03-05 20:37:49 +02001214 err = hci_req_run(&req, hci_req_sync_complete);
1215 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001216 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001217
1218 /* ENODATA means the HCI request command queue is empty.
1219 * This can happen when a request with conditionals doesn't
1220 * trigger any commands to be sent. This is normal behavior
1221 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 */
Andre Guedes920c8302013-03-08 11:20:15 -03001223 if (err == -ENODATA)
1224 return 0;
1225
1226 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001227 }
1228
Andre Guedesbc4445c2013-03-08 11:20:13 -03001229 add_wait_queue(&hdev->req_wait_q, &wait);
1230 set_current_state(TASK_INTERRUPTIBLE);
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 schedule_timeout(timeout);
1233
1234 remove_wait_queue(&hdev->req_wait_q, &wait);
1235
1236 if (signal_pending(current))
1237 return -EINTR;
1238
1239 switch (hdev->req_status) {
1240 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001241 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 break;
1243
1244 case HCI_REQ_CANCELED:
1245 err = -hdev->req_result;
1246 break;
1247
1248 default:
1249 err = -ETIMEDOUT;
1250 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Johan Hedberga5040ef2011-01-10 13:28:59 +02001253 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
1255 BT_DBG("%s end: err %d", hdev->name, err);
1256
1257 return err;
1258}
1259
Johan Hedberg01178cd2013-03-05 20:37:41 +02001260static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261 void (*req)(struct hci_request *req,
1262 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001263 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264{
1265 int ret;
1266
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001267 if (!test_bit(HCI_UP, &hdev->flags))
1268 return -ENETDOWN;
1269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* Serialize all requests */
1271 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001272 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 hci_req_unlock(hdev);
1274
1275 return ret;
1276}
1277
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
1282 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 set_bit(HCI_RESET, &req->hdev->flags);
1284 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285}
1286
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001294 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001296
1297 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299}
1300
Johan Hedberg42c6b122013-03-05 20:37:49 +02001301static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001302{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001303 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001304
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001305 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001307
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001308 /* Read Local Supported Commands */
1309 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310
1311 /* Read Local Supported Features */
1312 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001314 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001315 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001316
1317 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001319
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001320 /* Read Flow Control Mode */
1321 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001323 /* Read Location Data */
1324 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001325}
1326
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001328{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001329 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001330
1331 BT_DBG("%s %ld", hdev->name, opt);
1332
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001333 /* Reset */
1334 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001335 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001336
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001337 switch (hdev->dev_type) {
1338 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001340 break;
1341
1342 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001344 break;
1345
1346 default:
1347 BT_ERR("Unknown device type %d", hdev->dev_type);
1348 break;
1349 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001350}
1351
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001354 struct hci_dev *hdev = req->hdev;
1355
Johan Hedberg2177bab2013-03-05 20:37:43 +02001356 __le16 param;
1357 __u8 flt_type;
1358
1359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001361
1362 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364
1365 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001367
1368 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001370
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001371 /* Read Number of Supported IAC */
1372 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001374 /* Read Current IAC LAP */
1375 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 /* Clear Event Filters */
1378 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001379 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001380
1381 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001382 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001385 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386 * but it does not support page scan related HCI commands.
1387 */
1388 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001389 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392}
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001396 struct hci_dev *hdev = req->hdev;
1397
Johan Hedberg2177bab2013-03-05 20:37:43 +02001398 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001399 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400
1401 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001402 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001404 /* Read LE Supported States */
1405 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406
Johan Hedberg2177bab2013-03-05 20:37:43 +02001407 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001408 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001409
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001410 /* Clear LE White List */
1411 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001412
1413 /* LE-only controllers have LE implicitly enabled */
1414 if (!lmp_bredr_capable(hdev))
1415 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001416}
1417
1418static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419{
1420 if (lmp_ext_inq_capable(hdev))
1421 return 0x02;
1422
1423 if (lmp_inq_rssi_capable(hdev))
1424 return 0x01;
1425
1426 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427 hdev->lmp_subver == 0x0757)
1428 return 0x01;
1429
1430 if (hdev->manufacturer == 15) {
1431 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432 return 0x01;
1433 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434 return 0x01;
1435 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436 return 0x01;
1437 }
1438
1439 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440 hdev->lmp_subver == 0x1805)
1441 return 0x01;
1442
1443 return 0x00;
1444}
1445
Johan Hedberg42c6b122013-03-05 20:37:49 +02001446static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001447{
1448 u8 mode;
1449
Johan Hedberg42c6b122013-03-05 20:37:49 +02001450 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001451
Johan Hedberg42c6b122013-03-05 20:37:49 +02001452 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453}
1454
Johan Hedberg42c6b122013-03-05 20:37:49 +02001455static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001456{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001457 struct hci_dev *hdev = req->hdev;
1458
Johan Hedberg2177bab2013-03-05 20:37:43 +02001459 /* The second byte is 0xff instead of 0x9f (two reserved bits
1460 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461 * command otherwise.
1462 */
1463 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464
1465 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466 * any event mask for pre 1.2 devices.
1467 */
1468 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469 return;
1470
1471 if (lmp_bredr_capable(hdev)) {
1472 events[4] |= 0x01; /* Flow Specification Complete */
1473 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475 events[5] |= 0x08; /* Synchronous Connection Complete */
1476 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001477 } else {
1478 /* Use a different default for LE-only devices */
1479 memset(events, 0, sizeof(events));
1480 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001481 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482 events[1] |= 0x20; /* Command Complete */
1483 events[1] |= 0x40; /* Command Status */
1484 events[1] |= 0x80; /* Hardware Error */
1485 events[2] |= 0x04; /* Number of Completed Packets */
1486 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001487
1488 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489 events[0] |= 0x80; /* Encryption Change */
1490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001492 }
1493
1494 if (lmp_inq_rssi_capable(hdev))
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496
1497 if (lmp_sniffsubr_capable(hdev))
1498 events[5] |= 0x20; /* Sniff Subrating */
1499
1500 if (lmp_pause_enc_capable(hdev))
1501 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502
1503 if (lmp_ext_inq_capable(hdev))
1504 events[5] |= 0x40; /* Extended Inquiry Result */
1505
1506 if (lmp_no_flush_capable(hdev))
1507 events[7] |= 0x01; /* Enhanced Flush Complete */
1508
1509 if (lmp_lsto_capable(hdev))
1510 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511
1512 if (lmp_ssp_capable(hdev)) {
1513 events[6] |= 0x01; /* IO Capability Request */
1514 events[6] |= 0x02; /* IO Capability Response */
1515 events[6] |= 0x04; /* User Confirmation Request */
1516 events[6] |= 0x08; /* User Passkey Request */
1517 events[6] |= 0x10; /* Remote OOB Data Request */
1518 events[6] |= 0x20; /* Simple Pairing Complete */
1519 events[7] |= 0x04; /* User Passkey Notification */
1520 events[7] |= 0x08; /* Keypress Notification */
1521 events[7] |= 0x10; /* Remote Host Supported
1522 * Features Notification
1523 */
1524 }
1525
1526 if (lmp_le_capable(hdev))
1527 events[7] |= 0x20; /* LE Meta-Event */
1528
Johan Hedberg42c6b122013-03-05 20:37:49 +02001529 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001530}
1531
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001533{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001534 struct hci_dev *hdev = req->hdev;
1535
Johan Hedberg2177bab2013-03-05 20:37:43 +02001536 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001537 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001538 else
1539 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001540
1541 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001544 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545 * local supported commands HCI command.
1546 */
1547 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549
1550 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001551 /* When SSP is available, then the host features page
1552 * should also be available as well. However some
1553 * controllers list the max_page as 0 as long as SSP
1554 * has not been enabled. To achieve proper debugging
1555 * output, force the minimum max_page to 1 at least.
1556 */
1557 hdev->max_page = 0x01;
1558
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001563 } else {
1564 struct hci_cp_write_eir cp;
1565
1566 memset(hdev->eir, 0, sizeof(hdev->eir));
1567 memset(&cp, 0, sizeof(cp));
1568
Johan Hedberg42c6b122013-03-05 20:37:49 +02001569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570 }
1571 }
1572
1573 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001575
1576 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001578
1579 if (lmp_ext_feat_capable(hdev)) {
1580 struct hci_cp_read_local_ext_features cp;
1581
1582 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585 }
1586
1587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001591 }
1592}
1593
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597 struct hci_cp_write_def_link_policy cp;
1598 u16 link_policy = 0;
1599
1600 if (lmp_rswitch_capable(hdev))
1601 link_policy |= HCI_LP_RSWITCH;
1602 if (lmp_hold_capable(hdev))
1603 link_policy |= HCI_LP_HOLD;
1604 if (lmp_sniff_capable(hdev))
1605 link_policy |= HCI_LP_SNIFF;
1606 if (lmp_park_capable(hdev))
1607 link_policy |= HCI_LP_PARK;
1608
1609 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611}
1612
Johan Hedberg42c6b122013-03-05 20:37:49 +02001613static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001614{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001615 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001616 struct hci_cp_write_le_host_supported cp;
1617
Johan Hedbergc73eee92013-04-19 18:35:21 +03001618 /* LE-only devices do not support explicit enablement */
1619 if (!lmp_bredr_capable(hdev))
1620 return;
1621
Johan Hedberg2177bab2013-03-05 20:37:43 +02001622 memset(&cp, 0, sizeof(cp));
1623
1624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001626 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001627 }
1628
1629 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001632}
1633
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001634static void hci_set_event_mask_page_2(struct hci_request *req)
1635{
1636 struct hci_dev *hdev = req->hdev;
1637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638
1639 /* If Connectionless Slave Broadcast master role is supported
1640 * enable all necessary events for it.
1641 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001642 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001643 events[1] |= 0x40; /* Triggered Clock Capture */
1644 events[1] |= 0x80; /* Synchronization Train Complete */
1645 events[2] |= 0x10; /* Slave Page Response Timeout */
1646 events[2] |= 0x20; /* CSB Channel Map Change */
1647 }
1648
1649 /* If Connectionless Slave Broadcast slave role is supported
1650 * enable all necessary events for it.
1651 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001652 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001653 events[2] |= 0x01; /* Synchronization Train Received */
1654 events[2] |= 0x02; /* CSB Receive */
1655 events[2] |= 0x04; /* CSB Timeout */
1656 events[2] |= 0x08; /* Truncated Page Complete */
1657 }
1658
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001659 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001660 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001661 events[2] |= 0x80;
1662
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001663 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1664}
1665
Johan Hedberg42c6b122013-03-05 20:37:49 +02001666static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001667{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001668 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001669 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001670
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001671 hci_setup_event_mask(req);
1672
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001673 /* Some Broadcom based Bluetooth controllers do not support the
1674 * Delete Stored Link Key command. They are clearly indicating its
1675 * absence in the bit mask of supported commands.
1676 *
1677 * Check the supported commands and only if the the command is marked
1678 * as supported send it. If not supported assume that the controller
1679 * does not have actual support for stored link keys which makes this
1680 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001681 *
1682 * Some controllers indicate that they support handling deleting
1683 * stored link keys, but they don't. The quirk lets a driver
1684 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001685 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001686 if (hdev->commands[6] & 0x80 &&
1687 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001688 struct hci_cp_delete_stored_link_key cp;
1689
1690 bacpy(&cp.bdaddr, BDADDR_ANY);
1691 cp.delete_all = 0x01;
1692 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693 sizeof(cp), &cp);
1694 }
1695
Johan Hedberg2177bab2013-03-05 20:37:43 +02001696 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001697 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001698
Andre Guedes9193c6e2014-07-01 18:10:09 -03001699 if (lmp_le_capable(hdev)) {
1700 u8 events[8];
1701
1702 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001703 events[0] = 0x0f;
1704
1705 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001707
1708 /* If controller supports the Connection Parameters Request
1709 * Link Layer Procedure, enable the corresponding event.
1710 */
1711 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712 events[0] |= 0x20; /* LE Remote Connection
1713 * Parameter Request
1714 */
1715
Andre Guedes9193c6e2014-07-01 18:10:09 -03001716 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717 events);
1718
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001719 if (hdev->commands[25] & 0x40) {
1720 /* Read LE Advertising Channel TX Power */
1721 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722 }
1723
Johan Hedberg42c6b122013-03-05 20:37:49 +02001724 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001725 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001726
1727 /* Read features beyond page 1 if available */
1728 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729 struct hci_cp_read_local_ext_features cp;
1730
1731 cp.page = p;
1732 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733 sizeof(cp), &cp);
1734 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001735}
1736
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001737static void hci_init4_req(struct hci_request *req, unsigned long opt)
1738{
1739 struct hci_dev *hdev = req->hdev;
1740
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001741 /* Set event mask page 2 if the HCI command for it is supported */
1742 if (hdev->commands[22] & 0x04)
1743 hci_set_event_mask_page_2(req);
1744
Marcel Holtmann109e3192014-07-23 19:24:56 +02001745 /* Read local codec list if the HCI command is supported */
1746 if (hdev->commands[29] & 0x20)
1747 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001749 /* Get MWS transport configuration if the HCI command is supported */
1750 if (hdev->commands[30] & 0x08)
1751 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001753 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001754 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001755 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001756
1757 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001758 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001759 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001760 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761 u8 support = 0x01;
1762 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763 sizeof(support), &support);
1764 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001765}
1766
Johan Hedberg2177bab2013-03-05 20:37:43 +02001767static int __hci_init(struct hci_dev *hdev)
1768{
1769 int err;
1770
1771 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001775 /* The Device Under Test (DUT) mode is special and available for
1776 * all controller types. So just create it early on.
1777 */
1778 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780 &dut_mode_fops);
1781 }
1782
Johan Hedberg2177bab2013-03-05 20:37:43 +02001783 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784 * BR/EDR/LE type controllers. AMP controllers only need the
1785 * first stage init.
1786 */
1787 if (hdev->dev_type != HCI_BREDR)
1788 return 0;
1789
1790 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791 if (err < 0)
1792 return err;
1793
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001794 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1797
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001798 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799 if (err < 0)
1800 return err;
1801
1802 /* Only create debugfs entries during the initial setup
1803 * phase and not every time the controller gets powered on.
1804 */
1805 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806 return 0;
1807
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001808 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001810 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811 &hdev->manufacturer);
1812 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001814 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001816 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001818 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1819
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001820 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821 &conn_info_min_age_fops);
1822 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823 &conn_info_max_age_fops);
1824
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001825 if (lmp_bredr_capable(hdev)) {
1826 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001828 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001830 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001832 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001834 }
1835
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001836 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001837 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001839 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001841 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001843 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001844
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001845 if (lmp_sniff_capable(hdev)) {
1846 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847 hdev, &idle_timeout_fops);
1848 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849 hdev, &sniff_min_interval_fops);
1850 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851 hdev, &sniff_max_interval_fops);
1852 }
1853
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001854 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001855 debugfs_create_file("identity", 0400, hdev->debugfs,
1856 hdev, &identity_fops);
1857 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001859 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001861 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862 hdev, &static_address_fops);
1863
1864 /* For controllers with a public address, provide a debug
1865 * option to force the usage of the configured static
1866 * address. By default the public address is used.
1867 */
1868 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869 debugfs_create_file("force_static_address", 0644,
1870 hdev->debugfs, hdev,
1871 &force_static_address_fops);
1872
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001873 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001875 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001877 debugfs_create_file("identity_resolving_keys", 0400,
1878 hdev->debugfs, hdev,
1879 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001880 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001882 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883 hdev, &conn_min_interval_fops);
1884 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001886 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001888 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001890 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001892 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893 hdev, &adv_min_interval_fops);
1894 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895 hdev, &adv_max_interval_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001896 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001898 debugfs_create_u16("discov_interleaved_timeout", 0644,
1899 hdev->debugfs,
1900 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001901
Johan Hedberg711eafe2014-08-08 09:32:52 +03001902 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001903 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001904
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001905 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001906}
1907
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001908static void hci_init0_req(struct hci_request *req, unsigned long opt)
1909{
1910 struct hci_dev *hdev = req->hdev;
1911
1912 BT_DBG("%s %ld", hdev->name, opt);
1913
1914 /* Reset */
1915 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1916 hci_reset_req(req, 0);
1917
1918 /* Read Local Version */
1919 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1920
1921 /* Read BD Address */
1922 if (hdev->set_bdaddr)
1923 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1924}
1925
1926static int __hci_unconf_init(struct hci_dev *hdev)
1927{
1928 int err;
1929
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001930 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1931 return 0;
1932
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001933 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1934 if (err < 0)
1935 return err;
1936
1937 return 0;
1938}
1939
Johan Hedberg42c6b122013-03-05 20:37:49 +02001940static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941{
1942 __u8 scan = opt;
1943
Johan Hedberg42c6b122013-03-05 20:37:49 +02001944 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
1946 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001947 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948}
1949
Johan Hedberg42c6b122013-03-05 20:37:49 +02001950static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951{
1952 __u8 auth = opt;
1953
Johan Hedberg42c6b122013-03-05 20:37:49 +02001954 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
1956 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001957 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958}
1959
Johan Hedberg42c6b122013-03-05 20:37:49 +02001960static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961{
1962 __u8 encrypt = opt;
1963
Johan Hedberg42c6b122013-03-05 20:37:49 +02001964 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001966 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001967 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968}
1969
Johan Hedberg42c6b122013-03-05 20:37:49 +02001970static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001971{
1972 __le16 policy = cpu_to_le16(opt);
1973
Johan Hedberg42c6b122013-03-05 20:37:49 +02001974 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001975
1976 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001977 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001978}
1979
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001980/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 * Device is held on return. */
1982struct hci_dev *hci_dev_get(int index)
1983{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001984 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
1986 BT_DBG("%d", index);
1987
1988 if (index < 0)
1989 return NULL;
1990
1991 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001992 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 if (d->id == index) {
1994 hdev = hci_dev_hold(d);
1995 break;
1996 }
1997 }
1998 read_unlock(&hci_dev_list_lock);
1999 return hdev;
2000}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
2002/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002003
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002004bool hci_discovery_active(struct hci_dev *hdev)
2005{
2006 struct discovery_state *discov = &hdev->discovery;
2007
Andre Guedes6fbe1952012-02-03 17:47:58 -03002008 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002009 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002010 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002011 return true;
2012
Andre Guedes6fbe1952012-02-03 17:47:58 -03002013 default:
2014 return false;
2015 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002016}
2017
Johan Hedbergff9ef572012-01-04 14:23:45 +02002018void hci_discovery_set_state(struct hci_dev *hdev, int state)
2019{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002020 int old_state = hdev->discovery.state;
2021
Johan Hedbergff9ef572012-01-04 14:23:45 +02002022 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2023
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002024 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002025 return;
2026
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002027 hdev->discovery.state = state;
2028
Johan Hedbergff9ef572012-01-04 14:23:45 +02002029 switch (state) {
2030 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002031 hci_update_background_scan(hdev);
2032
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002033 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002034 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002035 break;
2036 case DISCOVERY_STARTING:
2037 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002038 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002039 mgmt_discovering(hdev, 1);
2040 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002041 case DISCOVERY_RESOLVING:
2042 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002043 case DISCOVERY_STOPPING:
2044 break;
2045 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002046}
2047
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002048void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049{
Johan Hedberg30883512012-01-04 14:16:21 +02002050 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002051 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
Johan Hedberg561aafb2012-01-04 13:31:59 +02002053 list_for_each_entry_safe(p, n, &cache->all, all) {
2054 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002055 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002057
2058 INIT_LIST_HEAD(&cache->unknown);
2059 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060}
2061
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002062struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2063 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064{
Johan Hedberg30883512012-01-04 14:16:21 +02002065 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 struct inquiry_entry *e;
2067
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002068 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
Johan Hedberg561aafb2012-01-04 13:31:59 +02002070 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002072 return e;
2073 }
2074
2075 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076}
2077
Johan Hedberg561aafb2012-01-04 13:31:59 +02002078struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002079 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002080{
Johan Hedberg30883512012-01-04 14:16:21 +02002081 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002082 struct inquiry_entry *e;
2083
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002084 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002085
2086 list_for_each_entry(e, &cache->unknown, list) {
2087 if (!bacmp(&e->data.bdaddr, bdaddr))
2088 return e;
2089 }
2090
2091 return NULL;
2092}
2093
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002094struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002095 bdaddr_t *bdaddr,
2096 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002097{
2098 struct discovery_state *cache = &hdev->discovery;
2099 struct inquiry_entry *e;
2100
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002101 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002102
2103 list_for_each_entry(e, &cache->resolve, list) {
2104 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2105 return e;
2106 if (!bacmp(&e->data.bdaddr, bdaddr))
2107 return e;
2108 }
2109
2110 return NULL;
2111}
2112
Johan Hedberga3d4e202012-01-09 00:53:02 +02002113void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002114 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002115{
2116 struct discovery_state *cache = &hdev->discovery;
2117 struct list_head *pos = &cache->resolve;
2118 struct inquiry_entry *p;
2119
2120 list_del(&ie->list);
2121
2122 list_for_each_entry(p, &cache->resolve, list) {
2123 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002124 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002125 break;
2126 pos = &p->list;
2127 }
2128
2129 list_add(&ie->list, pos);
2130}
2131
Marcel Holtmannaf589252014-07-01 14:11:20 +02002132u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2133 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134{
Johan Hedberg30883512012-01-04 14:16:21 +02002135 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002136 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002137 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002139 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
Szymon Janc2b2fec42012-11-20 11:38:54 +01002141 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2142
Marcel Holtmannaf589252014-07-01 14:11:20 +02002143 if (!data->ssp_mode)
2144 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002145
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002146 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002147 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002148 if (!ie->data.ssp_mode)
2149 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002150
Johan Hedberga3d4e202012-01-09 00:53:02 +02002151 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002152 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002153 ie->data.rssi = data->rssi;
2154 hci_inquiry_cache_update_resolve(hdev, ie);
2155 }
2156
Johan Hedberg561aafb2012-01-04 13:31:59 +02002157 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002158 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002159
Johan Hedberg561aafb2012-01-04 13:31:59 +02002160 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002161 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002162 if (!ie) {
2163 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2164 goto done;
2165 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002166
2167 list_add(&ie->all, &cache->all);
2168
2169 if (name_known) {
2170 ie->name_state = NAME_KNOWN;
2171 } else {
2172 ie->name_state = NAME_NOT_KNOWN;
2173 list_add(&ie->list, &cache->unknown);
2174 }
2175
2176update:
2177 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002178 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002179 ie->name_state = NAME_KNOWN;
2180 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 }
2182
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002183 memcpy(&ie->data, data, sizeof(*data));
2184 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002186
2187 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002188 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002189
Marcel Holtmannaf589252014-07-01 14:11:20 +02002190done:
2191 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192}
2193
2194static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2195{
Johan Hedberg30883512012-01-04 14:16:21 +02002196 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 struct inquiry_info *info = (struct inquiry_info *) buf;
2198 struct inquiry_entry *e;
2199 int copied = 0;
2200
Johan Hedberg561aafb2012-01-04 13:31:59 +02002201 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002203
2204 if (copied >= num)
2205 break;
2206
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 bacpy(&info->bdaddr, &data->bdaddr);
2208 info->pscan_rep_mode = data->pscan_rep_mode;
2209 info->pscan_period_mode = data->pscan_period_mode;
2210 info->pscan_mode = data->pscan_mode;
2211 memcpy(info->dev_class, data->dev_class, 3);
2212 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002215 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 }
2217
2218 BT_DBG("cache %p, copied %d", cache, copied);
2219 return copied;
2220}
2221
Johan Hedberg42c6b122013-03-05 20:37:49 +02002222static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223{
2224 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002225 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 struct hci_cp_inquiry cp;
2227
2228 BT_DBG("%s", hdev->name);
2229
2230 if (test_bit(HCI_INQUIRY, &hdev->flags))
2231 return;
2232
2233 /* Start Inquiry */
2234 memcpy(&cp.lap, &ir->lap, 3);
2235 cp.length = ir->length;
2236 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002237 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238}
2239
2240int hci_inquiry(void __user *arg)
2241{
2242 __u8 __user *ptr = arg;
2243 struct hci_inquiry_req ir;
2244 struct hci_dev *hdev;
2245 int err = 0, do_inquiry = 0, max_rsp;
2246 long timeo;
2247 __u8 *buf;
2248
2249 if (copy_from_user(&ir, ptr, sizeof(ir)))
2250 return -EFAULT;
2251
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002252 hdev = hci_dev_get(ir.dev_id);
2253 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 return -ENODEV;
2255
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 err = -EBUSY;
2258 goto done;
2259 }
2260
Marcel Holtmann4a964402014-07-02 19:10:33 +02002261 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002262 err = -EOPNOTSUPP;
2263 goto done;
2264 }
2265
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002266 if (hdev->dev_type != HCI_BREDR) {
2267 err = -EOPNOTSUPP;
2268 goto done;
2269 }
2270
Johan Hedberg56f87902013-10-02 13:43:13 +03002271 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2272 err = -EOPNOTSUPP;
2273 goto done;
2274 }
2275
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002276 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002277 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002278 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002279 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 do_inquiry = 1;
2281 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002282 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
Marcel Holtmann04837f62006-07-03 10:02:33 +02002284 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002285
2286 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002287 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2288 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002289 if (err < 0)
2290 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002291
2292 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2293 * cleared). If it is interrupted by a signal, return -EINTR.
2294 */
NeilBrown74316202014-07-07 15:16:04 +10002295 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002296 TASK_INTERRUPTIBLE))
2297 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002300 /* for unlimited number of responses we will use buffer with
2301 * 255 entries
2302 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2304
2305 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2306 * copy it to the user space.
2307 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002308 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002309 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 err = -ENOMEM;
2311 goto done;
2312 }
2313
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002314 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002316 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
2318 BT_DBG("num_rsp %d", ir.num_rsp);
2319
2320 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2321 ptr += sizeof(ir);
2322 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002323 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002325 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 err = -EFAULT;
2327
2328 kfree(buf);
2329
2330done:
2331 hci_dev_put(hdev);
2332 return err;
2333}
2334
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002335static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 int ret = 0;
2338
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 BT_DBG("%s %p", hdev->name, hdev);
2340
2341 hci_req_lock(hdev);
2342
Johan Hovold94324962012-03-15 14:48:41 +01002343 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2344 ret = -ENODEV;
2345 goto done;
2346 }
2347
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002348 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2349 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002350 /* Check for rfkill but allow the HCI setup stage to
2351 * proceed (which in itself doesn't cause any RF activity).
2352 */
2353 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2354 ret = -ERFKILL;
2355 goto done;
2356 }
2357
2358 /* Check for valid public address or a configured static
2359 * random adddress, but let the HCI setup proceed to
2360 * be able to determine if there is a public address
2361 * or not.
2362 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002363 * In case of user channel usage, it is not important
2364 * if a public address or static random address is
2365 * available.
2366 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002367 * This check is only valid for BR/EDR controllers
2368 * since AMP controllers do not have an address.
2369 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002370 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2371 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002372 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2373 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2374 ret = -EADDRNOTAVAIL;
2375 goto done;
2376 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002377 }
2378
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 if (test_bit(HCI_UP, &hdev->flags)) {
2380 ret = -EALREADY;
2381 goto done;
2382 }
2383
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 if (hdev->open(hdev)) {
2385 ret = -EIO;
2386 goto done;
2387 }
2388
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002389 atomic_set(&hdev->cmd_cnt, 1);
2390 set_bit(HCI_INIT, &hdev->flags);
2391
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002392 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2393 if (hdev->setup)
2394 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002395
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002396 /* The transport driver can set these quirks before
2397 * creating the HCI device or in its setup callback.
2398 *
2399 * In case any of them is set, the controller has to
2400 * start up as unconfigured.
2401 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002402 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2403 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002404 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002405
2406 /* For an unconfigured controller it is required to
2407 * read at least the version information provided by
2408 * the Read Local Version Information command.
2409 *
2410 * If the set_bdaddr driver callback is provided, then
2411 * also the original Bluetooth public device address
2412 * will be read using the Read BD Address command.
2413 */
2414 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2415 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002416 }
2417
Marcel Holtmann9713c172014-07-06 12:11:15 +02002418 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2419 /* If public address change is configured, ensure that
2420 * the address gets programmed. If the driver does not
2421 * support changing the public address, fail the power
2422 * on procedure.
2423 */
2424 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2425 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002426 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2427 else
2428 ret = -EADDRNOTAVAIL;
2429 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002430
2431 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002432 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002433 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002434 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 }
2436
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002437 clear_bit(HCI_INIT, &hdev->flags);
2438
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 if (!ret) {
2440 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002441 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 set_bit(HCI_UP, &hdev->flags);
2443 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002444 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002445 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002446 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002447 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002448 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002449 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002450 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002451 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002452 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002453 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002455 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002456 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002457 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
2459 skb_queue_purge(&hdev->cmd_q);
2460 skb_queue_purge(&hdev->rx_q);
2461
2462 if (hdev->flush)
2463 hdev->flush(hdev);
2464
2465 if (hdev->sent_cmd) {
2466 kfree_skb(hdev->sent_cmd);
2467 hdev->sent_cmd = NULL;
2468 }
2469
2470 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002471 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 }
2473
2474done:
2475 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 return ret;
2477}
2478
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002479/* ---- HCI ioctl helpers ---- */
2480
2481int hci_dev_open(__u16 dev)
2482{
2483 struct hci_dev *hdev;
2484 int err;
2485
2486 hdev = hci_dev_get(dev);
2487 if (!hdev)
2488 return -ENODEV;
2489
Marcel Holtmann4a964402014-07-02 19:10:33 +02002490 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002491 * up as user channel. Trying to bring them up as normal devices
2492 * will result into a failure. Only user channel operation is
2493 * possible.
2494 *
2495 * When this function is called for a user channel, the flag
2496 * HCI_USER_CHANNEL will be set first before attempting to
2497 * open the device.
2498 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002499 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002500 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2501 err = -EOPNOTSUPP;
2502 goto done;
2503 }
2504
Johan Hedberge1d08f42013-10-01 22:44:50 +03002505 /* We need to ensure that no other power on/off work is pending
2506 * before proceeding to call hci_dev_do_open. This is
2507 * particularly important if the setup procedure has not yet
2508 * completed.
2509 */
2510 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2511 cancel_delayed_work(&hdev->power_off);
2512
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002513 /* After this call it is guaranteed that the setup procedure
2514 * has finished. This means that error conditions like RFKILL
2515 * or no valid public or static random address apply.
2516 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002517 flush_workqueue(hdev->req_workqueue);
2518
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002519 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002520 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002521 * so that pairing works for them. Once the management interface
2522 * is in use this bit will be cleared again and userspace has
2523 * to explicitly enable it.
2524 */
2525 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2526 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002527 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002528
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002529 err = hci_dev_do_open(hdev);
2530
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002531done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002532 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002533 return err;
2534}
2535
Johan Hedbergd7347f32014-07-04 12:37:23 +03002536/* This function requires the caller holds hdev->lock */
2537static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2538{
2539 struct hci_conn_params *p;
2540
Johan Hedbergf161dd42014-08-15 21:06:54 +03002541 list_for_each_entry(p, &hdev->le_conn_params, list) {
2542 if (p->conn) {
2543 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002544 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002545 p->conn = NULL;
2546 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002547 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002548 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002549
2550 BT_DBG("All LE pending actions cleared");
2551}
2552
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553static int hci_dev_do_close(struct hci_dev *hdev)
2554{
2555 BT_DBG("%s %p", hdev->name, hdev);
2556
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002557 cancel_delayed_work(&hdev->power_off);
2558
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 hci_req_cancel(hdev, ENODEV);
2560 hci_req_lock(hdev);
2561
2562 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002563 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 hci_req_unlock(hdev);
2565 return 0;
2566 }
2567
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002568 /* Flush RX and TX works */
2569 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002570 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002572 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002573 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002574 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002575 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002576 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002577 }
2578
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002579 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002580 cancel_delayed_work(&hdev->service_cache);
2581
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002582 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002583
2584 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2585 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002586
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002587 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002588 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002589 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002590 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002591 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592
2593 hci_notify(hdev, HCI_DEV_DOWN);
2594
2595 if (hdev->flush)
2596 hdev->flush(hdev);
2597
2598 /* Reset device */
2599 skb_queue_purge(&hdev->cmd_q);
2600 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002601 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2602 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002603 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002605 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 clear_bit(HCI_INIT, &hdev->flags);
2607 }
2608
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002609 /* flush cmd work */
2610 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611
2612 /* Drop queues */
2613 skb_queue_purge(&hdev->rx_q);
2614 skb_queue_purge(&hdev->cmd_q);
2615 skb_queue_purge(&hdev->raw_q);
2616
2617 /* Drop last sent command */
2618 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002619 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 kfree_skb(hdev->sent_cmd);
2621 hdev->sent_cmd = NULL;
2622 }
2623
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002624 kfree_skb(hdev->recv_evt);
2625 hdev->recv_evt = NULL;
2626
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 /* After this point our queues are empty
2628 * and no tasks are scheduled. */
2629 hdev->close(hdev);
2630
Johan Hedberg35b973c2013-03-15 17:06:59 -05002631 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002632 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002633 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2634
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002635 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2636 if (hdev->dev_type == HCI_BREDR) {
2637 hci_dev_lock(hdev);
2638 mgmt_powered(hdev, 0);
2639 hci_dev_unlock(hdev);
2640 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002641 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002642
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002643 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002644 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002645
Johan Hedberge59fda82012-02-22 18:11:53 +02002646 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002647 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002648 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002649
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 hci_req_unlock(hdev);
2651
2652 hci_dev_put(hdev);
2653 return 0;
2654}
2655
2656int hci_dev_close(__u16 dev)
2657{
2658 struct hci_dev *hdev;
2659 int err;
2660
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002661 hdev = hci_dev_get(dev);
2662 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002664
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002665 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2666 err = -EBUSY;
2667 goto done;
2668 }
2669
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002670 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2671 cancel_delayed_work(&hdev->power_off);
2672
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002674
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002675done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 hci_dev_put(hdev);
2677 return err;
2678}
2679
2680int hci_dev_reset(__u16 dev)
2681{
2682 struct hci_dev *hdev;
2683 int ret = 0;
2684
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002685 hdev = hci_dev_get(dev);
2686 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 return -ENODEV;
2688
2689 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
Marcel Holtmann808a0492013-08-26 20:57:58 -07002691 if (!test_bit(HCI_UP, &hdev->flags)) {
2692 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002696 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2697 ret = -EBUSY;
2698 goto done;
2699 }
2700
Marcel Holtmann4a964402014-07-02 19:10:33 +02002701 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002702 ret = -EOPNOTSUPP;
2703 goto done;
2704 }
2705
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 /* Drop queues */
2707 skb_queue_purge(&hdev->rx_q);
2708 skb_queue_purge(&hdev->cmd_q);
2709
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002710 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002711 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002713 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714
2715 if (hdev->flush)
2716 hdev->flush(hdev);
2717
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002718 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002719 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002721 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
2723done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 hci_req_unlock(hdev);
2725 hci_dev_put(hdev);
2726 return ret;
2727}
2728
2729int hci_dev_reset_stat(__u16 dev)
2730{
2731 struct hci_dev *hdev;
2732 int ret = 0;
2733
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002734 hdev = hci_dev_get(dev);
2735 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 return -ENODEV;
2737
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002738 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2739 ret = -EBUSY;
2740 goto done;
2741 }
2742
Marcel Holtmann4a964402014-07-02 19:10:33 +02002743 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002744 ret = -EOPNOTSUPP;
2745 goto done;
2746 }
2747
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2749
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002750done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 return ret;
2753}
2754
Johan Hedberg123abc02014-07-10 12:09:07 +03002755static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2756{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002757 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002758
2759 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2760
2761 if ((scan & SCAN_PAGE))
2762 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2763 &hdev->dev_flags);
2764 else
2765 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2766 &hdev->dev_flags);
2767
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002768 if ((scan & SCAN_INQUIRY)) {
2769 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2770 &hdev->dev_flags);
2771 } else {
2772 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2773 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2774 &hdev->dev_flags);
2775 }
2776
Johan Hedberg123abc02014-07-10 12:09:07 +03002777 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2778 return;
2779
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002780 if (conn_changed || discov_changed) {
2781 /* In case this was disabled through mgmt */
2782 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2783
2784 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2785 mgmt_update_adv_data(hdev);
2786
Johan Hedberg123abc02014-07-10 12:09:07 +03002787 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002788 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002789}
2790
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791int hci_dev_cmd(unsigned int cmd, void __user *arg)
2792{
2793 struct hci_dev *hdev;
2794 struct hci_dev_req dr;
2795 int err = 0;
2796
2797 if (copy_from_user(&dr, arg, sizeof(dr)))
2798 return -EFAULT;
2799
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002800 hdev = hci_dev_get(dr.dev_id);
2801 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 return -ENODEV;
2803
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002804 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2805 err = -EBUSY;
2806 goto done;
2807 }
2808
Marcel Holtmann4a964402014-07-02 19:10:33 +02002809 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002810 err = -EOPNOTSUPP;
2811 goto done;
2812 }
2813
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002814 if (hdev->dev_type != HCI_BREDR) {
2815 err = -EOPNOTSUPP;
2816 goto done;
2817 }
2818
Johan Hedberg56f87902013-10-02 13:43:13 +03002819 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2820 err = -EOPNOTSUPP;
2821 goto done;
2822 }
2823
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 switch (cmd) {
2825 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002826 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2827 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 break;
2829
2830 case HCISETENCRYPT:
2831 if (!lmp_encrypt_capable(hdev)) {
2832 err = -EOPNOTSUPP;
2833 break;
2834 }
2835
2836 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2837 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002838 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2839 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 if (err)
2841 break;
2842 }
2843
Johan Hedberg01178cd2013-03-05 20:37:41 +02002844 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2845 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 break;
2847
2848 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002849 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2850 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002851
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002852 /* Ensure that the connectable and discoverable states
2853 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002854 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002855 if (!err)
2856 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 break;
2858
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002859 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002860 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2861 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002862 break;
2863
2864 case HCISETLINKMODE:
2865 hdev->link_mode = ((__u16) dr.dev_opt) &
2866 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2867 break;
2868
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 case HCISETPTYPE:
2870 hdev->pkt_type = (__u16) dr.dev_opt;
2871 break;
2872
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002874 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2875 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 break;
2877
2878 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002879 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2880 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 break;
2882
2883 default:
2884 err = -EINVAL;
2885 break;
2886 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002887
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002888done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 hci_dev_put(hdev);
2890 return err;
2891}
2892
2893int hci_get_dev_list(void __user *arg)
2894{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002895 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 struct hci_dev_list_req *dl;
2897 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 int n = 0, size, err;
2899 __u16 dev_num;
2900
2901 if (get_user(dev_num, (__u16 __user *) arg))
2902 return -EFAULT;
2903
2904 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2905 return -EINVAL;
2906
2907 size = sizeof(*dl) + dev_num * sizeof(*dr);
2908
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002909 dl = kzalloc(size, GFP_KERNEL);
2910 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 return -ENOMEM;
2912
2913 dr = dl->dev_req;
2914
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002915 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002916 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002917 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002918
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002919 /* When the auto-off is configured it means the transport
2920 * is running, but in that case still indicate that the
2921 * device is actually down.
2922 */
2923 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2924 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002925
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002927 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002928
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 if (++n >= dev_num)
2930 break;
2931 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002932 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
2934 dl->dev_num = n;
2935 size = sizeof(*dl) + n * sizeof(*dr);
2936
2937 err = copy_to_user(arg, dl, size);
2938 kfree(dl);
2939
2940 return err ? -EFAULT : 0;
2941}
2942
2943int hci_get_dev_info(void __user *arg)
2944{
2945 struct hci_dev *hdev;
2946 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002947 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 int err = 0;
2949
2950 if (copy_from_user(&di, arg, sizeof(di)))
2951 return -EFAULT;
2952
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002953 hdev = hci_dev_get(di.dev_id);
2954 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 return -ENODEV;
2956
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002957 /* When the auto-off is configured it means the transport
2958 * is running, but in that case still indicate that the
2959 * device is actually down.
2960 */
2961 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2962 flags = hdev->flags & ~BIT(HCI_UP);
2963 else
2964 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002965
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 strcpy(di.name, hdev->name);
2967 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002968 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002969 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002971 if (lmp_bredr_capable(hdev)) {
2972 di.acl_mtu = hdev->acl_mtu;
2973 di.acl_pkts = hdev->acl_pkts;
2974 di.sco_mtu = hdev->sco_mtu;
2975 di.sco_pkts = hdev->sco_pkts;
2976 } else {
2977 di.acl_mtu = hdev->le_mtu;
2978 di.acl_pkts = hdev->le_pkts;
2979 di.sco_mtu = 0;
2980 di.sco_pkts = 0;
2981 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 di.link_policy = hdev->link_policy;
2983 di.link_mode = hdev->link_mode;
2984
2985 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2986 memcpy(&di.features, &hdev->features, sizeof(di.features));
2987
2988 if (copy_to_user(arg, &di, sizeof(di)))
2989 err = -EFAULT;
2990
2991 hci_dev_put(hdev);
2992
2993 return err;
2994}
2995
2996/* ---- Interface to HCI drivers ---- */
2997
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002998static int hci_rfkill_set_block(void *data, bool blocked)
2999{
3000 struct hci_dev *hdev = data;
3001
3002 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3003
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003004 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3005 return -EBUSY;
3006
Johan Hedberg5e130362013-09-13 08:58:17 +03003007 if (blocked) {
3008 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003009 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3010 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003011 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003012 } else {
3013 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003014 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003015
3016 return 0;
3017}
3018
3019static const struct rfkill_ops hci_rfkill_ops = {
3020 .set_block = hci_rfkill_set_block,
3021};
3022
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003023static void hci_power_on(struct work_struct *work)
3024{
3025 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003026 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003027
3028 BT_DBG("%s", hdev->name);
3029
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003030 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003031 if (err < 0) {
3032 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003033 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003034 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003035
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003036 /* During the HCI setup phase, a few error conditions are
3037 * ignored and they need to be checked now. If they are still
3038 * valid, it is important to turn the device back off.
3039 */
3040 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003041 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003042 (hdev->dev_type == HCI_BREDR &&
3043 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3044 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003045 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3046 hci_dev_do_close(hdev);
3047 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003048 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3049 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003050 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003051
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003052 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003053 /* For unconfigured devices, set the HCI_RAW flag
3054 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003055 */
3056 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3057 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003058
3059 /* For fully configured devices, this will send
3060 * the Index Added event. For unconfigured devices,
3061 * it will send Unconfigued Index Added event.
3062 *
3063 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3064 * and no event will be send.
3065 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003066 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003067 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003068 /* When the controller is now configured, then it
3069 * is important to clear the HCI_RAW flag.
3070 */
3071 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3072 clear_bit(HCI_RAW, &hdev->flags);
3073
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003074 /* Powering on the controller with HCI_CONFIG set only
3075 * happens with the transition from unconfigured to
3076 * configured. This will send the Index Added event.
3077 */
3078 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003079 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003080}
3081
3082static void hci_power_off(struct work_struct *work)
3083{
Johan Hedberg32435532011-11-07 22:16:04 +02003084 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003085 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003086
3087 BT_DBG("%s", hdev->name);
3088
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003089 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003090}
3091
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003092static void hci_discov_off(struct work_struct *work)
3093{
3094 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003095
3096 hdev = container_of(work, struct hci_dev, discov_off.work);
3097
3098 BT_DBG("%s", hdev->name);
3099
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003100 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003101}
3102
Johan Hedberg35f74982014-02-18 17:14:32 +02003103void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003104{
Johan Hedberg48210022013-01-27 00:31:28 +02003105 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003106
Johan Hedberg48210022013-01-27 00:31:28 +02003107 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3108 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003109 kfree(uuid);
3110 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003111}
3112
Johan Hedberg35f74982014-02-18 17:14:32 +02003113void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003114{
3115 struct list_head *p, *n;
3116
3117 list_for_each_safe(p, n, &hdev->link_keys) {
3118 struct link_key *key;
3119
3120 key = list_entry(p, struct link_key, list);
3121
3122 list_del(p);
3123 kfree(key);
3124 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003125}
3126
Johan Hedberg35f74982014-02-18 17:14:32 +02003127void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003128{
3129 struct smp_ltk *k, *tmp;
3130
3131 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3132 list_del(&k->list);
3133 kfree(k);
3134 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003135}
3136
Johan Hedberg970c4e42014-02-18 10:19:33 +02003137void hci_smp_irks_clear(struct hci_dev *hdev)
3138{
3139 struct smp_irk *k, *tmp;
3140
3141 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3142 list_del(&k->list);
3143 kfree(k);
3144 }
3145}
3146
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003147struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3148{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003149 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003150
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003151 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003152 if (bacmp(bdaddr, &k->bdaddr) == 0)
3153 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003154
3155 return NULL;
3156}
3157
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303158static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003159 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003160{
3161 /* Legacy key */
3162 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303163 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003164
3165 /* Debug keys are insecure so don't store them persistently */
3166 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303167 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003168
3169 /* Changed combination key and there's no previous one */
3170 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303171 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003172
3173 /* Security mode 3 case */
3174 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303175 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003176
3177 /* Neither local nor remote side had no-bonding as requirement */
3178 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303179 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003180
3181 /* Local side had dedicated bonding as requirement */
3182 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303183 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003184
3185 /* Remote side had dedicated bonding as requirement */
3186 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303187 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003188
3189 /* If none of the above criteria match, then don't store the key
3190 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303191 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003192}
3193
Johan Hedberge804d252014-07-16 11:42:28 +03003194static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003195{
Johan Hedberge804d252014-07-16 11:42:28 +03003196 if (type == SMP_LTK)
3197 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003198
Johan Hedberge804d252014-07-16 11:42:28 +03003199 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003200}
3201
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003202struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003203 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003204{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003205 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003206
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003207 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003208 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003209 continue;
3210
Johan Hedberge804d252014-07-16 11:42:28 +03003211 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003212 continue;
3213
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003214 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003215 }
3216
3217 return NULL;
3218}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003219
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003220struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003221 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003222{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003223 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003224
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003225 list_for_each_entry(k, &hdev->long_term_keys, list)
3226 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003227 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberge804d252014-07-16 11:42:28 +03003228 ltk_role(k->type) == role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003229 return k;
3230
3231 return NULL;
3232}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003233
Johan Hedberg970c4e42014-02-18 10:19:33 +02003234struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3235{
3236 struct smp_irk *irk;
3237
3238 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3239 if (!bacmp(&irk->rpa, rpa))
3240 return irk;
3241 }
3242
3243 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003244 if (smp_irk_matches(hdev, irk->val, rpa)) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003245 bacpy(&irk->rpa, rpa);
3246 return irk;
3247 }
3248 }
3249
3250 return NULL;
3251}
3252
3253struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254 u8 addr_type)
3255{
3256 struct smp_irk *irk;
3257
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003258 /* Identity Address must be public or static random */
3259 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3260 return NULL;
3261
Johan Hedberg970c4e42014-02-18 10:19:33 +02003262 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3263 if (addr_type == irk->addr_type &&
3264 bacmp(bdaddr, &irk->bdaddr) == 0)
3265 return irk;
3266 }
3267
3268 return NULL;
3269}
3270
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003271struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003272 bdaddr_t *bdaddr, u8 *val, u8 type,
3273 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003274{
3275 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303276 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003277
3278 old_key = hci_find_link_key(hdev, bdaddr);
3279 if (old_key) {
3280 old_key_type = old_key->type;
3281 key = old_key;
3282 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003283 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003284 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003285 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003286 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003287 list_add(&key->list, &hdev->link_keys);
3288 }
3289
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003290 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003291
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003292 /* Some buggy controller combinations generate a changed
3293 * combination key for legacy pairing even when there's no
3294 * previous key */
3295 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003296 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003297 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003298 if (conn)
3299 conn->key_type = type;
3300 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003301
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003302 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003303 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003304 key->pin_len = pin_len;
3305
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003306 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003307 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003308 else
3309 key->type = type;
3310
Johan Hedberg7652ff62014-06-24 13:15:49 +03003311 if (persistent)
3312 *persistent = hci_persistent_key(hdev, conn, type,
3313 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003314
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003315 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003316}
3317
Johan Hedbergca9142b2014-02-19 14:57:44 +02003318struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003319 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003320 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003321{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003322 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003323 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003324
Johan Hedberge804d252014-07-16 11:42:28 +03003325 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003326 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003327 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003328 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003329 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003330 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003331 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003332 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003333 }
3334
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003335 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003336 key->bdaddr_type = addr_type;
3337 memcpy(key->val, tk, sizeof(key->val));
3338 key->authenticated = authenticated;
3339 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003340 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003341 key->enc_size = enc_size;
3342 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003343
Johan Hedbergca9142b2014-02-19 14:57:44 +02003344 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003345}
3346
Johan Hedbergca9142b2014-02-19 14:57:44 +02003347struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3348 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003349{
3350 struct smp_irk *irk;
3351
3352 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3353 if (!irk) {
3354 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3355 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003356 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003357
3358 bacpy(&irk->bdaddr, bdaddr);
3359 irk->addr_type = addr_type;
3360
3361 list_add(&irk->list, &hdev->identity_resolving_keys);
3362 }
3363
3364 memcpy(irk->val, val, 16);
3365 bacpy(&irk->rpa, rpa);
3366
Johan Hedbergca9142b2014-02-19 14:57:44 +02003367 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003368}
3369
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003370int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3371{
3372 struct link_key *key;
3373
3374 key = hci_find_link_key(hdev, bdaddr);
3375 if (!key)
3376 return -ENOENT;
3377
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003378 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003379
3380 list_del(&key->list);
3381 kfree(key);
3382
3383 return 0;
3384}
3385
Johan Hedberge0b2b272014-02-18 17:14:31 +02003386int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003387{
3388 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003389 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003390
3391 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003392 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003393 continue;
3394
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003395 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003396
3397 list_del(&k->list);
3398 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003399 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003400 }
3401
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003402 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003403}
3404
Johan Hedberga7ec7332014-02-18 17:14:35 +02003405void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3406{
3407 struct smp_irk *k, *tmp;
3408
Johan Hedberg668b7b12014-02-21 16:03:31 +02003409 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003410 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3411 continue;
3412
3413 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3414
3415 list_del(&k->list);
3416 kfree(k);
3417 }
3418}
3419
Ville Tervo6bd32322011-02-16 16:32:41 +02003420/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003421static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003422{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003423 struct hci_dev *hdev = container_of(work, struct hci_dev,
3424 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003425
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003426 if (hdev->sent_cmd) {
3427 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3428 u16 opcode = __le16_to_cpu(sent->opcode);
3429
3430 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3431 } else {
3432 BT_ERR("%s command tx timeout", hdev->name);
3433 }
3434
Ville Tervo6bd32322011-02-16 16:32:41 +02003435 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003436 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003437}
3438
Szymon Janc2763eda2011-03-22 13:12:22 +01003439struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003440 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003441{
3442 struct oob_data *data;
3443
3444 list_for_each_entry(data, &hdev->remote_oob_data, list)
3445 if (bacmp(bdaddr, &data->bdaddr) == 0)
3446 return data;
3447
3448 return NULL;
3449}
3450
3451int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3452{
3453 struct oob_data *data;
3454
3455 data = hci_find_remote_oob_data(hdev, bdaddr);
3456 if (!data)
3457 return -ENOENT;
3458
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003459 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003460
3461 list_del(&data->list);
3462 kfree(data);
3463
3464 return 0;
3465}
3466
Johan Hedberg35f74982014-02-18 17:14:32 +02003467void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003468{
3469 struct oob_data *data, *n;
3470
3471 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3472 list_del(&data->list);
3473 kfree(data);
3474 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003475}
3476
Marcel Holtmann07988722014-01-10 02:07:29 -08003477int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3478 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003479{
3480 struct oob_data *data;
3481
3482 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003483 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003484 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003485 if (!data)
3486 return -ENOMEM;
3487
3488 bacpy(&data->bdaddr, bdaddr);
3489 list_add(&data->list, &hdev->remote_oob_data);
3490 }
3491
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003492 memcpy(data->hash192, hash, sizeof(data->hash192));
3493 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003494
Marcel Holtmann07988722014-01-10 02:07:29 -08003495 memset(data->hash256, 0, sizeof(data->hash256));
3496 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3497
3498 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3499
3500 return 0;
3501}
3502
3503int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3504 u8 *hash192, u8 *randomizer192,
3505 u8 *hash256, u8 *randomizer256)
3506{
3507 struct oob_data *data;
3508
3509 data = hci_find_remote_oob_data(hdev, bdaddr);
3510 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003511 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003512 if (!data)
3513 return -ENOMEM;
3514
3515 bacpy(&data->bdaddr, bdaddr);
3516 list_add(&data->list, &hdev->remote_oob_data);
3517 }
3518
3519 memcpy(data->hash192, hash192, sizeof(data->hash192));
3520 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3521
3522 memcpy(data->hash256, hash256, sizeof(data->hash256));
3523 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3524
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003525 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003526
3527 return 0;
3528}
3529
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003530struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003531 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003532{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003533 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003534
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003535 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003536 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003537 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003538 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003539
3540 return NULL;
3541}
3542
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003543void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003544{
3545 struct list_head *p, *n;
3546
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003547 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003548 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003549
3550 list_del(p);
3551 kfree(b);
3552 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003553}
3554
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003555int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003556{
3557 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003558
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003559 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003560 return -EBADF;
3561
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003562 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003563 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003564
Johan Hedberg27f70f32014-07-21 10:50:06 +03003565 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003566 if (!entry)
3567 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003568
3569 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003570 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003571
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003572 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003573
3574 return 0;
3575}
3576
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003577int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003578{
3579 struct bdaddr_list *entry;
3580
Johan Hedberg35f74982014-02-18 17:14:32 +02003581 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003582 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003583 return 0;
3584 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003585
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003586 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003587 if (!entry)
3588 return -ENOENT;
3589
3590 list_del(&entry->list);
3591 kfree(entry);
3592
3593 return 0;
3594}
3595
Andre Guedes15819a72014-02-03 13:56:18 -03003596/* This function requires the caller holds hdev->lock */
3597struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3598 bdaddr_t *addr, u8 addr_type)
3599{
3600 struct hci_conn_params *params;
3601
Johan Hedberg738f6182014-07-03 19:33:51 +03003602 /* The conn params list only contains identity addresses */
3603 if (!hci_is_identity_address(addr, addr_type))
3604 return NULL;
3605
Andre Guedes15819a72014-02-03 13:56:18 -03003606 list_for_each_entry(params, &hdev->le_conn_params, list) {
3607 if (bacmp(&params->addr, addr) == 0 &&
3608 params->addr_type == addr_type) {
3609 return params;
3610 }
3611 }
3612
3613 return NULL;
3614}
3615
Andre Guedescef952c2014-02-26 20:21:49 -03003616static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3617{
3618 struct hci_conn *conn;
3619
3620 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3621 if (!conn)
3622 return false;
3623
3624 if (conn->dst_type != type)
3625 return false;
3626
3627 if (conn->state != BT_CONNECTED)
3628 return false;
3629
3630 return true;
3631}
3632
Andre Guedes15819a72014-02-03 13:56:18 -03003633/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003634struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3635 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003636{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003637 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003638
Johan Hedberg738f6182014-07-03 19:33:51 +03003639 /* The list only contains identity addresses */
3640 if (!hci_is_identity_address(addr, addr_type))
3641 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003642
Johan Hedberg501f8822014-07-04 12:37:26 +03003643 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003644 if (bacmp(&param->addr, addr) == 0 &&
3645 param->addr_type == addr_type)
3646 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003647 }
3648
3649 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003650}
3651
3652/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003653struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3654 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003655{
3656 struct hci_conn_params *params;
3657
Johan Hedbergc46245b2014-07-02 17:37:33 +03003658 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003659 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003660
Andre Guedes15819a72014-02-03 13:56:18 -03003661 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003662 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003663 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003664
3665 params = kzalloc(sizeof(*params), GFP_KERNEL);
3666 if (!params) {
3667 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003668 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003669 }
3670
3671 bacpy(&params->addr, addr);
3672 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003673
3674 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003675 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003676
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003677 params->conn_min_interval = hdev->le_conn_min_interval;
3678 params->conn_max_interval = hdev->le_conn_max_interval;
3679 params->conn_latency = hdev->le_conn_latency;
3680 params->supervision_timeout = hdev->le_supv_timeout;
3681 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3682
3683 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3684
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003685 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003686}
3687
3688/* This function requires the caller holds hdev->lock */
3689int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003690 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003691{
3692 struct hci_conn_params *params;
3693
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003694 params = hci_conn_params_add(hdev, addr, addr_type);
3695 if (!params)
3696 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003697
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003698 if (params->auto_connect == auto_connect)
3699 return 0;
3700
Johan Hedberg95305ba2014-07-04 12:37:21 +03003701 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003702
Andre Guedescef952c2014-02-26 20:21:49 -03003703 switch (auto_connect) {
3704 case HCI_AUTO_CONN_DISABLED:
3705 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003706 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003707 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003708 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003709 list_add(&params->action, &hdev->pend_le_reports);
3710 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003711 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003712 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003713 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003714 if (!is_connected(hdev, addr, addr_type)) {
3715 list_add(&params->action, &hdev->pend_le_conns);
3716 hci_update_background_scan(hdev);
3717 }
Andre Guedescef952c2014-02-26 20:21:49 -03003718 break;
3719 }
Andre Guedes15819a72014-02-03 13:56:18 -03003720
Johan Hedberg851efca2014-07-02 22:42:00 +03003721 params->auto_connect = auto_connect;
3722
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003723 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3724 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003725
3726 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003727}
3728
Johan Hedbergf6c63242014-08-15 21:06:59 +03003729static void hci_conn_params_free(struct hci_conn_params *params)
3730{
3731 if (params->conn) {
3732 hci_conn_drop(params->conn);
3733 hci_conn_put(params->conn);
3734 }
3735
3736 list_del(&params->action);
3737 list_del(&params->list);
3738 kfree(params);
3739}
3740
Andre Guedes15819a72014-02-03 13:56:18 -03003741/* This function requires the caller holds hdev->lock */
3742void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3743{
3744 struct hci_conn_params *params;
3745
3746 params = hci_conn_params_lookup(hdev, addr, addr_type);
3747 if (!params)
3748 return;
3749
Johan Hedbergf6c63242014-08-15 21:06:59 +03003750 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003751
Johan Hedberg95305ba2014-07-04 12:37:21 +03003752 hci_update_background_scan(hdev);
3753
Andre Guedes15819a72014-02-03 13:56:18 -03003754 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3755}
3756
3757/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003758void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003759{
3760 struct hci_conn_params *params, *tmp;
3761
3762 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003763 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3764 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003765 list_del(&params->list);
3766 kfree(params);
3767 }
3768
Johan Hedberg55af49a82014-07-02 17:37:26 +03003769 BT_DBG("All LE disabled connection parameters were removed");
3770}
3771
3772/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003773void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003774{
3775 struct hci_conn_params *params, *tmp;
3776
Johan Hedbergf6c63242014-08-15 21:06:59 +03003777 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3778 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003779
Johan Hedberga2f41a82014-07-04 12:37:19 +03003780 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003781
Andre Guedes15819a72014-02-03 13:56:18 -03003782 BT_DBG("All LE connection parameters were removed");
3783}
3784
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003785static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003786{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003787 if (status) {
3788 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003789
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003790 hci_dev_lock(hdev);
3791 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3792 hci_dev_unlock(hdev);
3793 return;
3794 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003795}
3796
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003797static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003798{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003799 /* General inquiry access code (GIAC) */
3800 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3801 struct hci_request req;
3802 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003803 int err;
3804
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003805 if (status) {
3806 BT_ERR("Failed to disable LE scanning: status %d", status);
3807 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003808 }
3809
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003810 switch (hdev->discovery.type) {
3811 case DISCOV_TYPE_LE:
3812 hci_dev_lock(hdev);
3813 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3814 hci_dev_unlock(hdev);
3815 break;
3816
3817 case DISCOV_TYPE_INTERLEAVED:
3818 hci_req_init(&req, hdev);
3819
3820 memset(&cp, 0, sizeof(cp));
3821 memcpy(&cp.lap, lap, sizeof(cp.lap));
3822 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3823 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3824
3825 hci_dev_lock(hdev);
3826
3827 hci_inquiry_cache_flush(hdev);
3828
3829 err = hci_req_run(&req, inquiry_complete);
3830 if (err) {
3831 BT_ERR("Inquiry request failed: err %d", err);
3832 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3833 }
3834
3835 hci_dev_unlock(hdev);
3836 break;
3837 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003838}
3839
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003840static void le_scan_disable_work(struct work_struct *work)
3841{
3842 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003843 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003844 struct hci_request req;
3845 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003846
3847 BT_DBG("%s", hdev->name);
3848
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003849 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003850
Andre Guedesb1efcc22014-02-26 20:21:40 -03003851 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003852
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003853 err = hci_req_run(&req, le_scan_disable_work_complete);
3854 if (err)
3855 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003856}
3857
Johan Hedberg8d972502014-02-28 12:54:14 +02003858static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3859{
3860 struct hci_dev *hdev = req->hdev;
3861
3862 /* If we're advertising or initiating an LE connection we can't
3863 * go ahead and change the random address at this time. This is
3864 * because the eventual initiator address used for the
3865 * subsequently created connection will be undefined (some
3866 * controllers use the new address and others the one we had
3867 * when the operation started).
3868 *
3869 * In this kind of scenario skip the update and let the random
3870 * address be updated at the next cycle.
3871 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003872 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003873 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3874 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003875 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003876 return;
3877 }
3878
3879 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3880}
3881
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003882int hci_update_random_address(struct hci_request *req, bool require_privacy,
3883 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003884{
3885 struct hci_dev *hdev = req->hdev;
3886 int err;
3887
3888 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003889 * current RPA has expired or there is something else than
3890 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003891 */
3892 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003893 int to;
3894
3895 *own_addr_type = ADDR_LE_DEV_RANDOM;
3896
3897 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003898 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003899 return 0;
3900
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003901 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003902 if (err < 0) {
3903 BT_ERR("%s failed to generate new RPA", hdev->name);
3904 return err;
3905 }
3906
Johan Hedberg8d972502014-02-28 12:54:14 +02003907 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003908
3909 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3910 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3911
3912 return 0;
3913 }
3914
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003915 /* In case of required privacy without resolvable private address,
3916 * use an unresolvable private address. This is useful for active
3917 * scanning and non-connectable advertising.
3918 */
3919 if (require_privacy) {
3920 bdaddr_t urpa;
3921
3922 get_random_bytes(&urpa, 6);
3923 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3924
3925 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003926 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003927 return 0;
3928 }
3929
Johan Hedbergebd3a742014-02-23 19:42:21 +02003930 /* If forcing static address is in use or there is no public
3931 * address use the static address as random address (but skip
3932 * the HCI command if the current random address is already the
3933 * static one.
3934 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003935 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003936 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3937 *own_addr_type = ADDR_LE_DEV_RANDOM;
3938 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3939 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3940 &hdev->static_addr);
3941 return 0;
3942 }
3943
3944 /* Neither privacy nor static address is being used so use a
3945 * public address.
3946 */
3947 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3948
3949 return 0;
3950}
3951
Johan Hedberga1f4c312014-02-27 14:05:41 +02003952/* Copy the Identity Address of the controller.
3953 *
3954 * If the controller has a public BD_ADDR, then by default use that one.
3955 * If this is a LE only controller without a public address, default to
3956 * the static random address.
3957 *
3958 * For debugging purposes it is possible to force controllers with a
3959 * public address to use the static random address instead.
3960 */
3961void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3962 u8 *bdaddr_type)
3963{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003964 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003965 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3966 bacpy(bdaddr, &hdev->static_addr);
3967 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3968 } else {
3969 bacpy(bdaddr, &hdev->bdaddr);
3970 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3971 }
3972}
3973
David Herrmann9be0dab2012-04-22 14:39:57 +02003974/* Alloc HCI device */
3975struct hci_dev *hci_alloc_dev(void)
3976{
3977 struct hci_dev *hdev;
3978
Johan Hedberg27f70f32014-07-21 10:50:06 +03003979 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003980 if (!hdev)
3981 return NULL;
3982
David Herrmannb1b813d2012-04-22 14:39:58 +02003983 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3984 hdev->esco_type = (ESCO_HV1);
3985 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003986 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3987 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003988 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003989 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3990 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003991
David Herrmannb1b813d2012-04-22 14:39:58 +02003992 hdev->sniff_max_interval = 800;
3993 hdev->sniff_min_interval = 80;
3994
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003995 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003996 hdev->le_adv_min_interval = 0x0800;
3997 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003998 hdev->le_scan_interval = 0x0060;
3999 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004000 hdev->le_conn_min_interval = 0x0028;
4001 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004002 hdev->le_conn_latency = 0x0000;
4003 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004004
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004005 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004006 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004007 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4008 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004009
David Herrmannb1b813d2012-04-22 14:39:58 +02004010 mutex_init(&hdev->lock);
4011 mutex_init(&hdev->req_lock);
4012
4013 INIT_LIST_HEAD(&hdev->mgmt_pending);
4014 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004015 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004016 INIT_LIST_HEAD(&hdev->uuids);
4017 INIT_LIST_HEAD(&hdev->link_keys);
4018 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004019 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004020 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004021 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004022 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004023 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004024 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004025 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004026
4027 INIT_WORK(&hdev->rx_work, hci_rx_work);
4028 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4029 INIT_WORK(&hdev->tx_work, hci_tx_work);
4030 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004031
David Herrmannb1b813d2012-04-22 14:39:58 +02004032 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4033 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4034 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4035
David Herrmannb1b813d2012-04-22 14:39:58 +02004036 skb_queue_head_init(&hdev->rx_q);
4037 skb_queue_head_init(&hdev->cmd_q);
4038 skb_queue_head_init(&hdev->raw_q);
4039
4040 init_waitqueue_head(&hdev->req_wait_q);
4041
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004042 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004043
David Herrmannb1b813d2012-04-22 14:39:58 +02004044 hci_init_sysfs(hdev);
4045 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004046
4047 return hdev;
4048}
4049EXPORT_SYMBOL(hci_alloc_dev);
4050
4051/* Free HCI device */
4052void hci_free_dev(struct hci_dev *hdev)
4053{
David Herrmann9be0dab2012-04-22 14:39:57 +02004054 /* will free via device release */
4055 put_device(&hdev->dev);
4056}
4057EXPORT_SYMBOL(hci_free_dev);
4058
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059/* Register HCI device */
4060int hci_register_dev(struct hci_dev *hdev)
4061{
David Herrmannb1b813d2012-04-22 14:39:58 +02004062 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063
Marcel Holtmann74292d52014-07-06 15:50:27 +02004064 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 return -EINVAL;
4066
Mat Martineau08add512011-11-02 16:18:36 -07004067 /* Do not allow HCI_AMP devices to register at index 0,
4068 * so the index can be used as the AMP controller ID.
4069 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004070 switch (hdev->dev_type) {
4071 case HCI_BREDR:
4072 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4073 break;
4074 case HCI_AMP:
4075 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4076 break;
4077 default:
4078 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004080
Sasha Levin3df92b32012-05-27 22:36:56 +02004081 if (id < 0)
4082 return id;
4083
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 sprintf(hdev->name, "hci%d", id);
4085 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004086
4087 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4088
Kees Cookd8537542013-07-03 15:04:57 -07004089 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4090 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004091 if (!hdev->workqueue) {
4092 error = -ENOMEM;
4093 goto err;
4094 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004095
Kees Cookd8537542013-07-03 15:04:57 -07004096 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4097 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004098 if (!hdev->req_workqueue) {
4099 destroy_workqueue(hdev->workqueue);
4100 error = -ENOMEM;
4101 goto err;
4102 }
4103
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004104 if (!IS_ERR_OR_NULL(bt_debugfs))
4105 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4106
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004107 dev_set_name(&hdev->dev, "%s", hdev->name);
4108
4109 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004110 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004111 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004113 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004114 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4115 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004116 if (hdev->rfkill) {
4117 if (rfkill_register(hdev->rfkill) < 0) {
4118 rfkill_destroy(hdev->rfkill);
4119 hdev->rfkill = NULL;
4120 }
4121 }
4122
Johan Hedberg5e130362013-09-13 08:58:17 +03004123 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4124 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4125
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004126 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004127 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004128
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004129 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004130 /* Assume BR/EDR support until proven otherwise (such as
4131 * through reading supported features during init.
4132 */
4133 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4134 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004135
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004136 write_lock(&hci_dev_list_lock);
4137 list_add(&hdev->list, &hci_dev_list);
4138 write_unlock(&hci_dev_list_lock);
4139
Marcel Holtmann4a964402014-07-02 19:10:33 +02004140 /* Devices that are marked for raw-only usage are unconfigured
4141 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004142 */
4143 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004144 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004145
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004147 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148
Johan Hedberg19202572013-01-14 22:33:51 +02004149 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004150
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004152
David Herrmann33ca9542011-10-08 14:58:49 +02004153err_wqueue:
4154 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004155 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004156err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004157 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004158
David Herrmann33ca9542011-10-08 14:58:49 +02004159 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160}
4161EXPORT_SYMBOL(hci_register_dev);
4162
4163/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004164void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165{
Sasha Levin3df92b32012-05-27 22:36:56 +02004166 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004167
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004168 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169
Johan Hovold94324962012-03-15 14:48:41 +01004170 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4171
Sasha Levin3df92b32012-05-27 22:36:56 +02004172 id = hdev->id;
4173
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004174 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004176 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177
4178 hci_dev_do_close(hdev);
4179
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304180 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004181 kfree_skb(hdev->reassembly[i]);
4182
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004183 cancel_work_sync(&hdev->power_on);
4184
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004185 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004186 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4187 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004188 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004189 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004190 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004191 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004192
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004193 /* mgmt_index_removed should take care of emptying the
4194 * pending list */
4195 BUG_ON(!list_empty(&hdev->mgmt_pending));
4196
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 hci_notify(hdev, HCI_DEV_UNREG);
4198
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004199 if (hdev->rfkill) {
4200 rfkill_unregister(hdev->rfkill);
4201 rfkill_destroy(hdev->rfkill);
4202 }
4203
Johan Hedberg711eafe2014-08-08 09:32:52 +03004204 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004205
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004206 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004207
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004208 debugfs_remove_recursive(hdev->debugfs);
4209
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004210 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004211 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004212
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004213 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004214 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004215 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004216 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004217 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004218 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004219 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004220 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004221 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004222 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004223 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004224
David Herrmanndc946bd2012-01-07 15:47:24 +01004225 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004226
4227 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228}
4229EXPORT_SYMBOL(hci_unregister_dev);
4230
4231/* Suspend HCI device */
4232int hci_suspend_dev(struct hci_dev *hdev)
4233{
4234 hci_notify(hdev, HCI_DEV_SUSPEND);
4235 return 0;
4236}
4237EXPORT_SYMBOL(hci_suspend_dev);
4238
4239/* Resume HCI device */
4240int hci_resume_dev(struct hci_dev *hdev)
4241{
4242 hci_notify(hdev, HCI_DEV_RESUME);
4243 return 0;
4244}
4245EXPORT_SYMBOL(hci_resume_dev);
4246
Marcel Holtmann76bca882009-11-18 00:40:39 +01004247/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004248int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004249{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004250 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004251 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004252 kfree_skb(skb);
4253 return -ENXIO;
4254 }
4255
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004256 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004257 bt_cb(skb)->incoming = 1;
4258
4259 /* Time stamp */
4260 __net_timestamp(skb);
4261
Marcel Holtmann76bca882009-11-18 00:40:39 +01004262 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004263 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004264
Marcel Holtmann76bca882009-11-18 00:40:39 +01004265 return 0;
4266}
4267EXPORT_SYMBOL(hci_recv_frame);
4268
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304269static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004270 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304271{
4272 int len = 0;
4273 int hlen = 0;
4274 int remain = count;
4275 struct sk_buff *skb;
4276 struct bt_skb_cb *scb;
4277
4278 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004279 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304280 return -EILSEQ;
4281
4282 skb = hdev->reassembly[index];
4283
4284 if (!skb) {
4285 switch (type) {
4286 case HCI_ACLDATA_PKT:
4287 len = HCI_MAX_FRAME_SIZE;
4288 hlen = HCI_ACL_HDR_SIZE;
4289 break;
4290 case HCI_EVENT_PKT:
4291 len = HCI_MAX_EVENT_SIZE;
4292 hlen = HCI_EVENT_HDR_SIZE;
4293 break;
4294 case HCI_SCODATA_PKT:
4295 len = HCI_MAX_SCO_SIZE;
4296 hlen = HCI_SCO_HDR_SIZE;
4297 break;
4298 }
4299
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004300 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304301 if (!skb)
4302 return -ENOMEM;
4303
4304 scb = (void *) skb->cb;
4305 scb->expect = hlen;
4306 scb->pkt_type = type;
4307
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304308 hdev->reassembly[index] = skb;
4309 }
4310
4311 while (count) {
4312 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004313 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304314
4315 memcpy(skb_put(skb, len), data, len);
4316
4317 count -= len;
4318 data += len;
4319 scb->expect -= len;
4320 remain = count;
4321
4322 switch (type) {
4323 case HCI_EVENT_PKT:
4324 if (skb->len == HCI_EVENT_HDR_SIZE) {
4325 struct hci_event_hdr *h = hci_event_hdr(skb);
4326 scb->expect = h->plen;
4327
4328 if (skb_tailroom(skb) < scb->expect) {
4329 kfree_skb(skb);
4330 hdev->reassembly[index] = NULL;
4331 return -ENOMEM;
4332 }
4333 }
4334 break;
4335
4336 case HCI_ACLDATA_PKT:
4337 if (skb->len == HCI_ACL_HDR_SIZE) {
4338 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4339 scb->expect = __le16_to_cpu(h->dlen);
4340
4341 if (skb_tailroom(skb) < scb->expect) {
4342 kfree_skb(skb);
4343 hdev->reassembly[index] = NULL;
4344 return -ENOMEM;
4345 }
4346 }
4347 break;
4348
4349 case HCI_SCODATA_PKT:
4350 if (skb->len == HCI_SCO_HDR_SIZE) {
4351 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4352 scb->expect = h->dlen;
4353
4354 if (skb_tailroom(skb) < scb->expect) {
4355 kfree_skb(skb);
4356 hdev->reassembly[index] = NULL;
4357 return -ENOMEM;
4358 }
4359 }
4360 break;
4361 }
4362
4363 if (scb->expect == 0) {
4364 /* Complete frame */
4365
4366 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004367 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304368
4369 hdev->reassembly[index] = NULL;
4370 return remain;
4371 }
4372 }
4373
4374 return remain;
4375}
4376
Marcel Holtmannef222012007-07-11 06:42:04 +02004377int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4378{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304379 int rem = 0;
4380
Marcel Holtmannef222012007-07-11 06:42:04 +02004381 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4382 return -EILSEQ;
4383
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004384 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004385 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304386 if (rem < 0)
4387 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004388
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304389 data += (count - rem);
4390 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004391 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004392
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304393 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004394}
4395EXPORT_SYMBOL(hci_recv_fragment);
4396
Suraj Sumangala99811512010-07-14 13:02:19 +05304397#define STREAM_REASSEMBLY 0
4398
4399int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4400{
4401 int type;
4402 int rem = 0;
4403
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004404 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304405 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4406
4407 if (!skb) {
4408 struct { char type; } *pkt;
4409
4410 /* Start of the frame */
4411 pkt = data;
4412 type = pkt->type;
4413
4414 data++;
4415 count--;
4416 } else
4417 type = bt_cb(skb)->pkt_type;
4418
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004419 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004420 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304421 if (rem < 0)
4422 return rem;
4423
4424 data += (count - rem);
4425 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004426 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304427
4428 return rem;
4429}
4430EXPORT_SYMBOL(hci_recv_stream_fragment);
4431
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432/* ---- Interface to upper protocols ---- */
4433
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434int hci_register_cb(struct hci_cb *cb)
4435{
4436 BT_DBG("%p name %s", cb, cb->name);
4437
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004438 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004440 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441
4442 return 0;
4443}
4444EXPORT_SYMBOL(hci_register_cb);
4445
4446int hci_unregister_cb(struct hci_cb *cb)
4447{
4448 BT_DBG("%p name %s", cb, cb->name);
4449
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004450 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004452 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453
4454 return 0;
4455}
4456EXPORT_SYMBOL(hci_unregister_cb);
4457
Marcel Holtmann51086992013-10-10 14:54:19 -07004458static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004460 int err;
4461
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004462 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004464 /* Time stamp */
4465 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004467 /* Send copy to monitor */
4468 hci_send_to_monitor(hdev, skb);
4469
4470 if (atomic_read(&hdev->promisc)) {
4471 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004472 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 }
4474
4475 /* Get rid of skb owner, prior to sending to the driver. */
4476 skb_orphan(skb);
4477
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004478 err = hdev->send(hdev, skb);
4479 if (err < 0) {
4480 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4481 kfree_skb(skb);
4482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483}
4484
Johan Hedberg3119ae92013-03-05 20:37:44 +02004485void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4486{
4487 skb_queue_head_init(&req->cmd_q);
4488 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004489 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004490}
4491
4492int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4493{
4494 struct hci_dev *hdev = req->hdev;
4495 struct sk_buff *skb;
4496 unsigned long flags;
4497
4498 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4499
Andre Guedes5d73e032013-03-08 11:20:16 -03004500 /* If an error occured during request building, remove all HCI
4501 * commands queued on the HCI request queue.
4502 */
4503 if (req->err) {
4504 skb_queue_purge(&req->cmd_q);
4505 return req->err;
4506 }
4507
Johan Hedberg3119ae92013-03-05 20:37:44 +02004508 /* Do not allow empty requests */
4509 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004510 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004511
4512 skb = skb_peek_tail(&req->cmd_q);
4513 bt_cb(skb)->req.complete = complete;
4514
4515 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4516 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4517 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4518
4519 queue_work(hdev->workqueue, &hdev->cmd_work);
4520
4521 return 0;
4522}
4523
Marcel Holtmann899de762014-07-11 05:51:58 +02004524bool hci_req_pending(struct hci_dev *hdev)
4525{
4526 return (hdev->req_status == HCI_REQ_PEND);
4527}
4528
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004529static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004530 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531{
4532 int len = HCI_COMMAND_HDR_SIZE + plen;
4533 struct hci_command_hdr *hdr;
4534 struct sk_buff *skb;
4535
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004537 if (!skb)
4538 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539
4540 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004541 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 hdr->plen = plen;
4543
4544 if (plen)
4545 memcpy(skb_put(skb, plen), param, plen);
4546
4547 BT_DBG("skb len %d", skb->len);
4548
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004549 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004550
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004551 return skb;
4552}
4553
4554/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004555int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4556 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004557{
4558 struct sk_buff *skb;
4559
4560 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4561
4562 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4563 if (!skb) {
4564 BT_ERR("%s no memory for command", hdev->name);
4565 return -ENOMEM;
4566 }
4567
Johan Hedberg11714b32013-03-05 20:37:47 +02004568 /* Stand-alone HCI commands must be flaged as
4569 * single-command requests.
4570 */
4571 bt_cb(skb)->req.start = true;
4572
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004574 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575
4576 return 0;
4577}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578
Johan Hedberg71c76a12013-03-05 20:37:46 +02004579/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004580void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4581 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004582{
4583 struct hci_dev *hdev = req->hdev;
4584 struct sk_buff *skb;
4585
4586 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4587
Andre Guedes34739c12013-03-08 11:20:18 -03004588 /* If an error occured during request building, there is no point in
4589 * queueing the HCI command. We can simply return.
4590 */
4591 if (req->err)
4592 return;
4593
Johan Hedberg71c76a12013-03-05 20:37:46 +02004594 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4595 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004596 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4597 hdev->name, opcode);
4598 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004599 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004600 }
4601
4602 if (skb_queue_empty(&req->cmd_q))
4603 bt_cb(skb)->req.start = true;
4604
Johan Hedberg02350a72013-04-03 21:50:29 +03004605 bt_cb(skb)->req.event = event;
4606
Johan Hedberg71c76a12013-03-05 20:37:46 +02004607 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004608}
4609
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004610void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4611 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004612{
4613 hci_req_add_ev(req, opcode, plen, param, 0);
4614}
4615
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004617void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618{
4619 struct hci_command_hdr *hdr;
4620
4621 if (!hdev->sent_cmd)
4622 return NULL;
4623
4624 hdr = (void *) hdev->sent_cmd->data;
4625
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004626 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 return NULL;
4628
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004629 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630
4631 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4632}
4633
4634/* Send ACL data */
4635static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4636{
4637 struct hci_acl_hdr *hdr;
4638 int len = skb->len;
4639
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004640 skb_push(skb, HCI_ACL_HDR_SIZE);
4641 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004642 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004643 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4644 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645}
4646
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004647static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004648 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004650 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 struct hci_dev *hdev = conn->hdev;
4652 struct sk_buff *list;
4653
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004654 skb->len = skb_headlen(skb);
4655 skb->data_len = 0;
4656
4657 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004658
4659 switch (hdev->dev_type) {
4660 case HCI_BREDR:
4661 hci_add_acl_hdr(skb, conn->handle, flags);
4662 break;
4663 case HCI_AMP:
4664 hci_add_acl_hdr(skb, chan->handle, flags);
4665 break;
4666 default:
4667 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4668 return;
4669 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004670
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004671 list = skb_shinfo(skb)->frag_list;
4672 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673 /* Non fragmented */
4674 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4675
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004676 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677 } else {
4678 /* Fragmented */
4679 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4680
4681 skb_shinfo(skb)->frag_list = NULL;
4682
4683 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004684 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004686 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004687
4688 flags &= ~ACL_START;
4689 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 do {
4691 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004692
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004693 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004694 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695
4696 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4697
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004698 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 } while (list);
4700
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004701 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004703}
4704
4705void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4706{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004707 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004708
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004709 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004710
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004711 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004713 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715
4716/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004717void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718{
4719 struct hci_dev *hdev = conn->hdev;
4720 struct hci_sco_hdr hdr;
4721
4722 BT_DBG("%s len %d", hdev->name, skb->len);
4723
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004724 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725 hdr.dlen = skb->len;
4726
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004727 skb_push(skb, HCI_SCO_HDR_SIZE);
4728 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004729 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004730
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004731 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004732
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004734 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736
4737/* ---- HCI TX task (outgoing data) ---- */
4738
4739/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004740static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4741 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742{
4743 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004744 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004745 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004747 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004749
4750 rcu_read_lock();
4751
4752 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004753 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004755
4756 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4757 continue;
4758
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759 num++;
4760
4761 if (c->sent < min) {
4762 min = c->sent;
4763 conn = c;
4764 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004765
4766 if (hci_conn_num(hdev, type) == num)
4767 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768 }
4769
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004770 rcu_read_unlock();
4771
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004773 int cnt, q;
4774
4775 switch (conn->type) {
4776 case ACL_LINK:
4777 cnt = hdev->acl_cnt;
4778 break;
4779 case SCO_LINK:
4780 case ESCO_LINK:
4781 cnt = hdev->sco_cnt;
4782 break;
4783 case LE_LINK:
4784 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4785 break;
4786 default:
4787 cnt = 0;
4788 BT_ERR("Unknown link type");
4789 }
4790
4791 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 *quote = q ? q : 1;
4793 } else
4794 *quote = 0;
4795
4796 BT_DBG("conn %p quote %d", conn, *quote);
4797 return conn;
4798}
4799
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004800static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801{
4802 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004803 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804
Ville Tervobae1f5d92011-02-10 22:38:53 -03004805 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004807 rcu_read_lock();
4808
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004810 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004811 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004812 BT_ERR("%s killing stalled connection %pMR",
4813 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004814 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815 }
4816 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004817
4818 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819}
4820
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004821static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4822 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004823{
4824 struct hci_conn_hash *h = &hdev->conn_hash;
4825 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004826 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004827 struct hci_conn *conn;
4828 int cnt, q, conn_num = 0;
4829
4830 BT_DBG("%s", hdev->name);
4831
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004832 rcu_read_lock();
4833
4834 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004835 struct hci_chan *tmp;
4836
4837 if (conn->type != type)
4838 continue;
4839
4840 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4841 continue;
4842
4843 conn_num++;
4844
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004845 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004846 struct sk_buff *skb;
4847
4848 if (skb_queue_empty(&tmp->data_q))
4849 continue;
4850
4851 skb = skb_peek(&tmp->data_q);
4852 if (skb->priority < cur_prio)
4853 continue;
4854
4855 if (skb->priority > cur_prio) {
4856 num = 0;
4857 min = ~0;
4858 cur_prio = skb->priority;
4859 }
4860
4861 num++;
4862
4863 if (conn->sent < min) {
4864 min = conn->sent;
4865 chan = tmp;
4866 }
4867 }
4868
4869 if (hci_conn_num(hdev, type) == conn_num)
4870 break;
4871 }
4872
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004873 rcu_read_unlock();
4874
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004875 if (!chan)
4876 return NULL;
4877
4878 switch (chan->conn->type) {
4879 case ACL_LINK:
4880 cnt = hdev->acl_cnt;
4881 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004882 case AMP_LINK:
4883 cnt = hdev->block_cnt;
4884 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004885 case SCO_LINK:
4886 case ESCO_LINK:
4887 cnt = hdev->sco_cnt;
4888 break;
4889 case LE_LINK:
4890 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4891 break;
4892 default:
4893 cnt = 0;
4894 BT_ERR("Unknown link type");
4895 }
4896
4897 q = cnt / num;
4898 *quote = q ? q : 1;
4899 BT_DBG("chan %p quote %d", chan, *quote);
4900 return chan;
4901}
4902
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004903static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4904{
4905 struct hci_conn_hash *h = &hdev->conn_hash;
4906 struct hci_conn *conn;
4907 int num = 0;
4908
4909 BT_DBG("%s", hdev->name);
4910
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004911 rcu_read_lock();
4912
4913 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004914 struct hci_chan *chan;
4915
4916 if (conn->type != type)
4917 continue;
4918
4919 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4920 continue;
4921
4922 num++;
4923
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004924 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004925 struct sk_buff *skb;
4926
4927 if (chan->sent) {
4928 chan->sent = 0;
4929 continue;
4930 }
4931
4932 if (skb_queue_empty(&chan->data_q))
4933 continue;
4934
4935 skb = skb_peek(&chan->data_q);
4936 if (skb->priority >= HCI_PRIO_MAX - 1)
4937 continue;
4938
4939 skb->priority = HCI_PRIO_MAX - 1;
4940
4941 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004942 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004943 }
4944
4945 if (hci_conn_num(hdev, type) == num)
4946 break;
4947 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004948
4949 rcu_read_unlock();
4950
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004951}
4952
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004953static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4954{
4955 /* Calculate count of blocks used by this packet */
4956 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4957}
4958
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004959static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004961 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962 /* ACL tx timeout must be longer than maximum
4963 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004964 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004965 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004966 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004968}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004970static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004971{
4972 unsigned int cnt = hdev->acl_cnt;
4973 struct hci_chan *chan;
4974 struct sk_buff *skb;
4975 int quote;
4976
4977 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004978
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004979 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004980 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004981 u32 priority = (skb_peek(&chan->data_q))->priority;
4982 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004983 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004984 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004985
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004986 /* Stop if priority has changed */
4987 if (skb->priority < priority)
4988 break;
4989
4990 skb = skb_dequeue(&chan->data_q);
4991
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004992 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004993 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004994
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004995 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996 hdev->acl_last_tx = jiffies;
4997
4998 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004999 chan->sent++;
5000 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001 }
5002 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005003
5004 if (cnt != hdev->acl_cnt)
5005 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006}
5007
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005008static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005009{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005010 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005011 struct hci_chan *chan;
5012 struct sk_buff *skb;
5013 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005014 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005015
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005016 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005017
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005018 BT_DBG("%s", hdev->name);
5019
5020 if (hdev->dev_type == HCI_AMP)
5021 type = AMP_LINK;
5022 else
5023 type = ACL_LINK;
5024
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005025 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005026 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005027 u32 priority = (skb_peek(&chan->data_q))->priority;
5028 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5029 int blocks;
5030
5031 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005032 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005033
5034 /* Stop if priority has changed */
5035 if (skb->priority < priority)
5036 break;
5037
5038 skb = skb_dequeue(&chan->data_q);
5039
5040 blocks = __get_blocks(hdev, skb);
5041 if (blocks > hdev->block_cnt)
5042 return;
5043
5044 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005045 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005046
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005047 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005048 hdev->acl_last_tx = jiffies;
5049
5050 hdev->block_cnt -= blocks;
5051 quote -= blocks;
5052
5053 chan->sent += blocks;
5054 chan->conn->sent += blocks;
5055 }
5056 }
5057
5058 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005059 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005060}
5061
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005062static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005063{
5064 BT_DBG("%s", hdev->name);
5065
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005066 /* No ACL link over BR/EDR controller */
5067 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5068 return;
5069
5070 /* No AMP link over AMP controller */
5071 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005072 return;
5073
5074 switch (hdev->flow_ctl_mode) {
5075 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5076 hci_sched_acl_pkt(hdev);
5077 break;
5078
5079 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5080 hci_sched_acl_blk(hdev);
5081 break;
5082 }
5083}
5084
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005086static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087{
5088 struct hci_conn *conn;
5089 struct sk_buff *skb;
5090 int quote;
5091
5092 BT_DBG("%s", hdev->name);
5093
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005094 if (!hci_conn_num(hdev, SCO_LINK))
5095 return;
5096
Linus Torvalds1da177e2005-04-16 15:20:36 -07005097 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5098 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5099 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005100 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101
5102 conn->sent++;
5103 if (conn->sent == ~0)
5104 conn->sent = 0;
5105 }
5106 }
5107}
5108
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005109static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005110{
5111 struct hci_conn *conn;
5112 struct sk_buff *skb;
5113 int quote;
5114
5115 BT_DBG("%s", hdev->name);
5116
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005117 if (!hci_conn_num(hdev, ESCO_LINK))
5118 return;
5119
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005120 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5121 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005122 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5123 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005124 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005125
5126 conn->sent++;
5127 if (conn->sent == ~0)
5128 conn->sent = 0;
5129 }
5130 }
5131}
5132
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005133static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005134{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005135 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005136 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005137 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005138
5139 BT_DBG("%s", hdev->name);
5140
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005141 if (!hci_conn_num(hdev, LE_LINK))
5142 return;
5143
Marcel Holtmann4a964402014-07-02 19:10:33 +02005144 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005145 /* LE tx timeout must be longer than maximum
5146 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005147 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005148 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005149 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005150 }
5151
5152 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005153 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005154 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005155 u32 priority = (skb_peek(&chan->data_q))->priority;
5156 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005157 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005158 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005159
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005160 /* Stop if priority has changed */
5161 if (skb->priority < priority)
5162 break;
5163
5164 skb = skb_dequeue(&chan->data_q);
5165
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005166 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005167 hdev->le_last_tx = jiffies;
5168
5169 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005170 chan->sent++;
5171 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005172 }
5173 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005174
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005175 if (hdev->le_pkts)
5176 hdev->le_cnt = cnt;
5177 else
5178 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005179
5180 if (cnt != tmp)
5181 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005182}
5183
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005184static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005186 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187 struct sk_buff *skb;
5188
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005189 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005190 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191
Marcel Holtmann52de5992013-09-03 18:08:38 -07005192 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5193 /* Schedule queues and send stuff to HCI driver */
5194 hci_sched_acl(hdev);
5195 hci_sched_sco(hdev);
5196 hci_sched_esco(hdev);
5197 hci_sched_le(hdev);
5198 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005199
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 /* Send next queued raw (unknown type) packet */
5201 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005202 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203}
5204
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005205/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206
5207/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005208static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209{
5210 struct hci_acl_hdr *hdr = (void *) skb->data;
5211 struct hci_conn *conn;
5212 __u16 handle, flags;
5213
5214 skb_pull(skb, HCI_ACL_HDR_SIZE);
5215
5216 handle = __le16_to_cpu(hdr->handle);
5217 flags = hci_flags(handle);
5218 handle = hci_handle(handle);
5219
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005220 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005221 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222
5223 hdev->stat.acl_rx++;
5224
5225 hci_dev_lock(hdev);
5226 conn = hci_conn_hash_lookup_handle(hdev, handle);
5227 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005228
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005230 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005231
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005233 l2cap_recv_acldata(conn, skb, flags);
5234 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005236 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005237 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238 }
5239
5240 kfree_skb(skb);
5241}
5242
5243/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005244static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245{
5246 struct hci_sco_hdr *hdr = (void *) skb->data;
5247 struct hci_conn *conn;
5248 __u16 handle;
5249
5250 skb_pull(skb, HCI_SCO_HDR_SIZE);
5251
5252 handle = __le16_to_cpu(hdr->handle);
5253
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005254 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255
5256 hdev->stat.sco_rx++;
5257
5258 hci_dev_lock(hdev);
5259 conn = hci_conn_hash_lookup_handle(hdev, handle);
5260 hci_dev_unlock(hdev);
5261
5262 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005264 sco_recv_scodata(conn, skb);
5265 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005267 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005268 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269 }
5270
5271 kfree_skb(skb);
5272}
5273
Johan Hedberg9238f362013-03-05 20:37:48 +02005274static bool hci_req_is_complete(struct hci_dev *hdev)
5275{
5276 struct sk_buff *skb;
5277
5278 skb = skb_peek(&hdev->cmd_q);
5279 if (!skb)
5280 return true;
5281
5282 return bt_cb(skb)->req.start;
5283}
5284
Johan Hedberg42c6b122013-03-05 20:37:49 +02005285static void hci_resend_last(struct hci_dev *hdev)
5286{
5287 struct hci_command_hdr *sent;
5288 struct sk_buff *skb;
5289 u16 opcode;
5290
5291 if (!hdev->sent_cmd)
5292 return;
5293
5294 sent = (void *) hdev->sent_cmd->data;
5295 opcode = __le16_to_cpu(sent->opcode);
5296 if (opcode == HCI_OP_RESET)
5297 return;
5298
5299 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5300 if (!skb)
5301 return;
5302
5303 skb_queue_head(&hdev->cmd_q, skb);
5304 queue_work(hdev->workqueue, &hdev->cmd_work);
5305}
5306
Johan Hedberg9238f362013-03-05 20:37:48 +02005307void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5308{
5309 hci_req_complete_t req_complete = NULL;
5310 struct sk_buff *skb;
5311 unsigned long flags;
5312
5313 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5314
Johan Hedberg42c6b122013-03-05 20:37:49 +02005315 /* If the completed command doesn't match the last one that was
5316 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005317 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005318 if (!hci_sent_cmd_data(hdev, opcode)) {
5319 /* Some CSR based controllers generate a spontaneous
5320 * reset complete event during init and any pending
5321 * command will never be completed. In such a case we
5322 * need to resend whatever was the last sent
5323 * command.
5324 */
5325 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5326 hci_resend_last(hdev);
5327
Johan Hedberg9238f362013-03-05 20:37:48 +02005328 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005329 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005330
5331 /* If the command succeeded and there's still more commands in
5332 * this request the request is not yet complete.
5333 */
5334 if (!status && !hci_req_is_complete(hdev))
5335 return;
5336
5337 /* If this was the last command in a request the complete
5338 * callback would be found in hdev->sent_cmd instead of the
5339 * command queue (hdev->cmd_q).
5340 */
5341 if (hdev->sent_cmd) {
5342 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005343
5344 if (req_complete) {
5345 /* We must set the complete callback to NULL to
5346 * avoid calling the callback more than once if
5347 * this function gets called again.
5348 */
5349 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5350
Johan Hedberg9238f362013-03-05 20:37:48 +02005351 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005352 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005353 }
5354
5355 /* Remove all pending commands belonging to this request */
5356 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5357 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5358 if (bt_cb(skb)->req.start) {
5359 __skb_queue_head(&hdev->cmd_q, skb);
5360 break;
5361 }
5362
5363 req_complete = bt_cb(skb)->req.complete;
5364 kfree_skb(skb);
5365 }
5366 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5367
5368call_complete:
5369 if (req_complete)
5370 req_complete(hdev, status);
5371}
5372
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005373static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005375 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376 struct sk_buff *skb;
5377
5378 BT_DBG("%s", hdev->name);
5379
Linus Torvalds1da177e2005-04-16 15:20:36 -07005380 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005381 /* Send copy to monitor */
5382 hci_send_to_monitor(hdev, skb);
5383
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384 if (atomic_read(&hdev->promisc)) {
5385 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005386 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387 }
5388
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005389 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005390 kfree_skb(skb);
5391 continue;
5392 }
5393
5394 if (test_bit(HCI_INIT, &hdev->flags)) {
5395 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005396 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005397 case HCI_ACLDATA_PKT:
5398 case HCI_SCODATA_PKT:
5399 kfree_skb(skb);
5400 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005402 }
5403
5404 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005405 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005406 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005407 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408 hci_event_packet(hdev, skb);
5409 break;
5410
5411 case HCI_ACLDATA_PKT:
5412 BT_DBG("%s ACL data packet", hdev->name);
5413 hci_acldata_packet(hdev, skb);
5414 break;
5415
5416 case HCI_SCODATA_PKT:
5417 BT_DBG("%s SCO data packet", hdev->name);
5418 hci_scodata_packet(hdev, skb);
5419 break;
5420
5421 default:
5422 kfree_skb(skb);
5423 break;
5424 }
5425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426}
5427
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005428static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005430 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431 struct sk_buff *skb;
5432
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005433 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5434 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005437 if (atomic_read(&hdev->cmd_cnt)) {
5438 skb = skb_dequeue(&hdev->cmd_q);
5439 if (!skb)
5440 return;
5441
Wei Yongjun7585b972009-02-25 18:29:52 +08005442 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005444 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005445 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005447 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005448 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005449 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005450 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005451 schedule_delayed_work(&hdev->cmd_timer,
5452 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453 } else {
5454 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005455 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456 }
5457 }
5458}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005459
5460void hci_req_add_le_scan_disable(struct hci_request *req)
5461{
5462 struct hci_cp_le_set_scan_enable cp;
5463
5464 memset(&cp, 0, sizeof(cp));
5465 cp.enable = LE_SCAN_DISABLE;
5466 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5467}
Andre Guedesa4790db2014-02-26 20:21:47 -03005468
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005469static void add_to_white_list(struct hci_request *req,
5470 struct hci_conn_params *params)
5471{
5472 struct hci_cp_le_add_to_white_list cp;
5473
5474 cp.bdaddr_type = params->addr_type;
5475 bacpy(&cp.bdaddr, &params->addr);
5476
5477 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5478}
5479
5480static u8 update_white_list(struct hci_request *req)
5481{
5482 struct hci_dev *hdev = req->hdev;
5483 struct hci_conn_params *params;
5484 struct bdaddr_list *b;
5485 uint8_t white_list_entries = 0;
5486
5487 /* Go through the current white list programmed into the
5488 * controller one by one and check if that address is still
5489 * in the list of pending connections or list of devices to
5490 * report. If not present in either list, then queue the
5491 * command to remove it from the controller.
5492 */
5493 list_for_each_entry(b, &hdev->le_white_list, list) {
5494 struct hci_cp_le_del_from_white_list cp;
5495
5496 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5497 &b->bdaddr, b->bdaddr_type) ||
5498 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5499 &b->bdaddr, b->bdaddr_type)) {
5500 white_list_entries++;
5501 continue;
5502 }
5503
5504 cp.bdaddr_type = b->bdaddr_type;
5505 bacpy(&cp.bdaddr, &b->bdaddr);
5506
5507 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5508 sizeof(cp), &cp);
5509 }
5510
5511 /* Since all no longer valid white list entries have been
5512 * removed, walk through the list of pending connections
5513 * and ensure that any new device gets programmed into
5514 * the controller.
5515 *
5516 * If the list of the devices is larger than the list of
5517 * available white list entries in the controller, then
5518 * just abort and return filer policy value to not use the
5519 * white list.
5520 */
5521 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5522 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5523 &params->addr, params->addr_type))
5524 continue;
5525
5526 if (white_list_entries >= hdev->le_white_list_size) {
5527 /* Select filter policy to accept all advertising */
5528 return 0x00;
5529 }
5530
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005531 if (hci_find_irk_by_addr(hdev, &params->addr,
5532 params->addr_type)) {
5533 /* White list can not be used with RPAs */
5534 return 0x00;
5535 }
5536
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005537 white_list_entries++;
5538 add_to_white_list(req, params);
5539 }
5540
5541 /* After adding all new pending connections, walk through
5542 * the list of pending reports and also add these to the
5543 * white list if there is still space.
5544 */
5545 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5546 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5547 &params->addr, params->addr_type))
5548 continue;
5549
5550 if (white_list_entries >= hdev->le_white_list_size) {
5551 /* Select filter policy to accept all advertising */
5552 return 0x00;
5553 }
5554
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005555 if (hci_find_irk_by_addr(hdev, &params->addr,
5556 params->addr_type)) {
5557 /* White list can not be used with RPAs */
5558 return 0x00;
5559 }
5560
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005561 white_list_entries++;
5562 add_to_white_list(req, params);
5563 }
5564
5565 /* Select filter policy to use white list */
5566 return 0x01;
5567}
5568
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005569void hci_req_add_le_passive_scan(struct hci_request *req)
5570{
5571 struct hci_cp_le_set_scan_param param_cp;
5572 struct hci_cp_le_set_scan_enable enable_cp;
5573 struct hci_dev *hdev = req->hdev;
5574 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005575 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005576
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005577 /* Set require_privacy to false since no SCAN_REQ are send
5578 * during passive scanning. Not using an unresolvable address
5579 * here is important so that peer devices using direct
5580 * advertising with our address will be correctly reported
5581 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005582 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005583 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005584 return;
5585
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005586 /* Adding or removing entries from the white list must
5587 * happen before enabling scanning. The controller does
5588 * not allow white list modification while scanning.
5589 */
5590 filter_policy = update_white_list(req);
5591
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005592 memset(&param_cp, 0, sizeof(param_cp));
5593 param_cp.type = LE_SCAN_PASSIVE;
5594 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5595 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5596 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005597 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005598 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5599 &param_cp);
5600
5601 memset(&enable_cp, 0, sizeof(enable_cp));
5602 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005603 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005604 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5605 &enable_cp);
5606}
5607
Andre Guedesa4790db2014-02-26 20:21:47 -03005608static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5609{
5610 if (status)
5611 BT_DBG("HCI request failed to update background scanning: "
5612 "status 0x%2.2x", status);
5613}
5614
5615/* This function controls the background scanning based on hdev->pend_le_conns
5616 * list. If there are pending LE connection we start the background scanning,
5617 * otherwise we stop it.
5618 *
5619 * This function requires the caller holds hdev->lock.
5620 */
5621void hci_update_background_scan(struct hci_dev *hdev)
5622{
Andre Guedesa4790db2014-02-26 20:21:47 -03005623 struct hci_request req;
5624 struct hci_conn *conn;
5625 int err;
5626
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005627 if (!test_bit(HCI_UP, &hdev->flags) ||
5628 test_bit(HCI_INIT, &hdev->flags) ||
5629 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005630 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005631 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005632 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005633 return;
5634
Johan Hedberga70f4b52014-07-07 15:19:50 +03005635 /* No point in doing scanning if LE support hasn't been enabled */
5636 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5637 return;
5638
Johan Hedbergae23ada2014-07-07 13:24:59 +03005639 /* If discovery is active don't interfere with it */
5640 if (hdev->discovery.state != DISCOVERY_STOPPED)
5641 return;
5642
Andre Guedesa4790db2014-02-26 20:21:47 -03005643 hci_req_init(&req, hdev);
5644
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005645 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005646 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005647 /* If there is no pending LE connections or devices
5648 * to be scanned for, we should stop the background
5649 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005650 */
5651
5652 /* If controller is not scanning we are done. */
5653 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5654 return;
5655
5656 hci_req_add_le_scan_disable(&req);
5657
5658 BT_DBG("%s stopping background scanning", hdev->name);
5659 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005660 /* If there is at least one pending LE connection, we should
5661 * keep the background scan running.
5662 */
5663
Andre Guedesa4790db2014-02-26 20:21:47 -03005664 /* If controller is connecting, we should not start scanning
5665 * since some controllers are not able to scan and connect at
5666 * the same time.
5667 */
5668 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5669 if (conn)
5670 return;
5671
Andre Guedes4340a122014-03-10 18:26:24 -03005672 /* If controller is currently scanning, we stop it to ensure we
5673 * don't miss any advertising (due to duplicates filter).
5674 */
5675 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5676 hci_req_add_le_scan_disable(&req);
5677
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005678 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005679
5680 BT_DBG("%s starting background scanning", hdev->name);
5681 }
5682
5683 err = hci_req_run(&req, update_background_scan_complete);
5684 if (err)
5685 BT_ERR("Failed to run HCI request: err %d", err);
5686}
Johan Hedberg432df052014-08-01 11:13:31 +03005687
Johan Hedberg22f433d2014-08-01 11:13:32 +03005688static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5689{
5690 struct bdaddr_list *b;
5691
5692 list_for_each_entry(b, &hdev->whitelist, list) {
5693 struct hci_conn *conn;
5694
5695 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5696 if (!conn)
5697 return true;
5698
5699 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5700 return true;
5701 }
5702
5703 return false;
5704}
5705
Johan Hedberg432df052014-08-01 11:13:31 +03005706void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5707{
5708 u8 scan;
5709
5710 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5711 return;
5712
5713 if (!hdev_is_powered(hdev))
5714 return;
5715
5716 if (mgmt_powering_down(hdev))
5717 return;
5718
5719 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005720 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005721 scan = SCAN_PAGE;
5722 else
5723 scan = SCAN_DISABLED;
5724
5725 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5726 return;
5727
5728 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5729 scan |= SCAN_INQUIRY;
5730
5731 if (req)
5732 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5733 else
5734 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5735}