blob: b29a984f7dd46922195d860557d9c6b427a49741 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Johan Hedberg66593582014-07-09 12:59:14 +0300203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmann47219832013-10-17 17:24:15 -0700228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700236
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700243
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700244 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
Marcel Holtmann041000b2013-10-17 12:02:31 -0700351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
Marcel Holtmann111902f2014-06-21 04:53:17 +0200421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800422 return -EALREADY;
423
Marcel Holtmann111902f2014-06-21 04:53:17 +0200424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700462 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700521 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700549 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
Marcel Holtmannac345812014-02-23 12:44:25 -0800625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200628 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
Johan Hedberga1f4c312014-02-27 14:05:41 +0200633 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800634
Johan Hedberga1f4c312014-02-27 14:05:41 +0200635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800636 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700704{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712}
713
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
722
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
725
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700731 return -EINVAL;
732
Marcel Holtmann111902f2014-06-21 04:53:17 +0200733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800734 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700735
Marcel Holtmann111902f2014-06-21 04:53:17 +0200736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800737
738 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700739}
740
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
Marcel Holtmann92202182013-10-18 16:38:10 -0700747
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
Marcel Holtmann3698d702014-02-18 21:54:49 -0800773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800809 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800814 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700869 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
Marcel Holtmannf1649572014-06-30 12:34:38 +0200917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200973static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300974{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200975 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 struct hci_conn_params *p;
977
978 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300979 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300981 p->auto_connect);
982 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300983 hci_dev_unlock(hdev);
984
985 return 0;
986}
987
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200988static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300989{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200990 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300991}
992
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200993static const struct file_operations device_list_fops = {
994 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300996 .llseek = seq_lseek,
997 .release = single_release,
998};
999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000/* ---- HCI requests ---- */
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1010 }
1011}
1012
1013static void hci_req_cancel(struct hci_dev *hdev, int err)
1014{
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1021 }
1022}
1023
Fengguang Wu77a63e02013-04-20 16:24:31 +03001024static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001026{
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1030
1031 hci_dev_lock(hdev);
1032
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1035
1036 hci_dev_unlock(hdev);
1037
1038 if (!skb)
1039 return ERR_PTR(-ENODATA);
1040
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1043 goto failed;
1044 }
1045
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001049 if (event) {
1050 if (hdr->evt != event)
1051 goto failed;
1052 return skb;
1053 }
1054
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057 goto failed;
1058 }
1059
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1062 goto failed;
1063 }
1064
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1067
1068 if (opcode == __le16_to_cpu(ev->opcode))
1069 return skb;
1070
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1073
1074failed:
1075 kfree_skb(skb);
1076 return ERR_PTR(-ENODATA);
1077}
1078
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001079struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001080 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001081{
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1084 int err = 0;
1085
1086 BT_DBG("%s", hdev->name);
1087
1088 hci_req_init(&req, hdev);
1089
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001090 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001091
1092 hdev->req_status = HCI_REQ_PEND;
1093
1094 err = hci_req_run(&req, hci_req_sync_complete);
1095 if (err < 0)
1096 return ERR_PTR(err);
1097
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1100
1101 schedule_timeout(timeout);
1102
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1107
1108 switch (hdev->req_status) {
1109 case HCI_REQ_DONE:
1110 err = -bt_to_errno(hdev->req_result);
1111 break;
1112
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1115 break;
1116
1117 default:
1118 err = -ETIMEDOUT;
1119 break;
1120 }
1121
1122 hdev->req_status = hdev->req_result = 0;
1123
1124 BT_DBG("%s end: err %d", hdev->name, err);
1125
1126 if (err < 0)
1127 return ERR_PTR(err);
1128
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001129 return hci_get_cmd_complete(hdev, opcode, event);
1130}
1131EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001134 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001135{
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137}
1138EXPORT_SYMBOL(__hci_cmd_sync);
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001141static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 void (*func)(struct hci_request *req,
1143 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001144 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 DECLARE_WAITQUEUE(wait, current);
1148 int err = 0;
1149
1150 BT_DBG("%s start", hdev->name);
1151
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_init(&req, hdev);
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 hdev->req_status = HCI_REQ_PEND;
1155
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001157
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 err = hci_req_run(&req, hci_req_sync_complete);
1159 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001160 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001161
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 */
Andre Guedes920c8302013-03-08 11:20:15 -03001167 if (err == -ENODATA)
1168 return 0;
1169
1170 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001171 }
1172
Andre Guedesbc4445c2013-03-08 11:20:13 -03001173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return -EINTR;
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001185 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Johan Hedberga5040ef2011-01-10 13:28:59 +02001197 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 return err;
1202}
1203
Johan Hedberg01178cd2013-03-05 20:37:41 +02001204static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 void (*req)(struct hci_request *req,
1206 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001207 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
1209 int ret;
1210
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001211 if (!test_bit(HCI_UP, &hdev->flags))
1212 return -ENETDOWN;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 /* Serialize all requests */
1215 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 hci_req_unlock(hdev);
1218
1219 return ret;
1220}
1221
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001238 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240
1241 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001246{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001248
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001249 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001251
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001258 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001260
1261 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001263
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001269}
1270
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001272{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001274
1275 BT_DBG("%s %ld", hdev->name, opt);
1276
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001277 /* Reset */
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001280
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281 switch (hdev->dev_type) {
1282 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 break;
1285
1286 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001288 break;
1289
1290 default:
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1292 break;
1293 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001294}
1295
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001298 struct hci_dev *hdev = req->hdev;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 __le16 param;
1301 __u8 flt_type;
1302
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305
1306 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308
1309 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001311
1312 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001314
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001324
1325 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001326 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1331 */
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001336}
1337
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001340 struct hci_dev *hdev = req->hdev;
1341
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344
1345 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
Johan Hedberg2177bab2013-03-05 20:37:43 +02001351 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001356
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360}
1361
1362static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363{
1364 if (lmp_ext_inq_capable(hdev))
1365 return 0x02;
1366
1367 if (lmp_inq_rssi_capable(hdev))
1368 return 0x01;
1369
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1372 return 0x01;
1373
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376 return 0x01;
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378 return 0x01;
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380 return 0x01;
1381 }
1382
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1385 return 0x01;
1386
1387 return 0x00;
1388}
1389
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391{
1392 u8 mode;
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395
Johan Hedberg42c6b122013-03-05 20:37:49 +02001396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397}
1398
Johan Hedberg42c6b122013-03-05 20:37:49 +02001399static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 struct hci_dev *hdev = req->hdev;
1402
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1406 */
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1411 */
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413 return;
1414
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001421 } else {
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
1425 events[0] |= 0x80; /* Encryption Change */
1426 events[1] |= 0x08; /* Read Remote Version Information Complete */
1427 events[1] |= 0x20; /* Command Complete */
1428 events[1] |= 0x40; /* Command Status */
1429 events[1] |= 0x80; /* Hardware Error */
1430 events[2] |= 0x04; /* Number of Completed Packets */
1431 events[3] |= 0x02; /* Data Buffer Overflow */
1432 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001433 }
1434
1435 if (lmp_inq_rssi_capable(hdev))
1436 events[4] |= 0x02; /* Inquiry Result with RSSI */
1437
1438 if (lmp_sniffsubr_capable(hdev))
1439 events[5] |= 0x20; /* Sniff Subrating */
1440
1441 if (lmp_pause_enc_capable(hdev))
1442 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1443
1444 if (lmp_ext_inq_capable(hdev))
1445 events[5] |= 0x40; /* Extended Inquiry Result */
1446
1447 if (lmp_no_flush_capable(hdev))
1448 events[7] |= 0x01; /* Enhanced Flush Complete */
1449
1450 if (lmp_lsto_capable(hdev))
1451 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1452
1453 if (lmp_ssp_capable(hdev)) {
1454 events[6] |= 0x01; /* IO Capability Request */
1455 events[6] |= 0x02; /* IO Capability Response */
1456 events[6] |= 0x04; /* User Confirmation Request */
1457 events[6] |= 0x08; /* User Passkey Request */
1458 events[6] |= 0x10; /* Remote OOB Data Request */
1459 events[6] |= 0x20; /* Simple Pairing Complete */
1460 events[7] |= 0x04; /* User Passkey Notification */
1461 events[7] |= 0x08; /* Keypress Notification */
1462 events[7] |= 0x10; /* Remote Host Supported
1463 * Features Notification
1464 */
1465 }
1466
1467 if (lmp_le_capable(hdev))
1468 events[7] |= 0x20; /* LE Meta-Event */
1469
Johan Hedberg42c6b122013-03-05 20:37:49 +02001470 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471}
1472
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001474{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001475 struct hci_dev *hdev = req->hdev;
1476
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001479 else
1480 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481
1482 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001483 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001484
Johan Hedberg42c6b122013-03-05 20:37:49 +02001485 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001486
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001487 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1488 * local supported commands HCI command.
1489 */
1490 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001491 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001492
1493 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001494 /* When SSP is available, then the host features page
1495 * should also be available as well. However some
1496 * controllers list the max_page as 0 as long as SSP
1497 * has not been enabled. To achieve proper debugging
1498 * output, force the minimum max_page to 1 at least.
1499 */
1500 hdev->max_page = 0x01;
1501
Johan Hedberg2177bab2013-03-05 20:37:43 +02001502 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1503 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001504 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1505 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001506 } else {
1507 struct hci_cp_write_eir cp;
1508
1509 memset(hdev->eir, 0, sizeof(hdev->eir));
1510 memset(&cp, 0, sizeof(cp));
1511
Johan Hedberg42c6b122013-03-05 20:37:49 +02001512 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001513 }
1514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001517 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001518
1519 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001520 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001521
1522 if (lmp_ext_feat_capable(hdev)) {
1523 struct hci_cp_read_local_ext_features cp;
1524
1525 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001526 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1527 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528 }
1529
1530 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1531 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1533 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 }
1535}
1536
Johan Hedberg42c6b122013-03-05 20:37:49 +02001537static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001538{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001539 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001540 struct hci_cp_write_def_link_policy cp;
1541 u16 link_policy = 0;
1542
1543 if (lmp_rswitch_capable(hdev))
1544 link_policy |= HCI_LP_RSWITCH;
1545 if (lmp_hold_capable(hdev))
1546 link_policy |= HCI_LP_HOLD;
1547 if (lmp_sniff_capable(hdev))
1548 link_policy |= HCI_LP_SNIFF;
1549 if (lmp_park_capable(hdev))
1550 link_policy |= HCI_LP_PARK;
1551
1552 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001553 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001554}
1555
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001558 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559 struct hci_cp_write_le_host_supported cp;
1560
Johan Hedbergc73eee92013-04-19 18:35:21 +03001561 /* LE-only devices do not support explicit enablement */
1562 if (!lmp_bredr_capable(hdev))
1563 return;
1564
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565 memset(&cp, 0, sizeof(cp));
1566
1567 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1568 cp.le = 0x01;
1569 cp.simul = lmp_le_br_capable(hdev);
1570 }
1571
1572 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1574 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001575}
1576
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001577static void hci_set_event_mask_page_2(struct hci_request *req)
1578{
1579 struct hci_dev *hdev = req->hdev;
1580 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1581
1582 /* If Connectionless Slave Broadcast master role is supported
1583 * enable all necessary events for it.
1584 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001585 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001586 events[1] |= 0x40; /* Triggered Clock Capture */
1587 events[1] |= 0x80; /* Synchronization Train Complete */
1588 events[2] |= 0x10; /* Slave Page Response Timeout */
1589 events[2] |= 0x20; /* CSB Channel Map Change */
1590 }
1591
1592 /* If Connectionless Slave Broadcast slave role is supported
1593 * enable all necessary events for it.
1594 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001595 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001596 events[2] |= 0x01; /* Synchronization Train Received */
1597 events[2] |= 0x02; /* CSB Receive */
1598 events[2] |= 0x04; /* CSB Timeout */
1599 events[2] |= 0x08; /* Truncated Page Complete */
1600 }
1601
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001602 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001603 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001604 events[2] |= 0x80;
1605
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001606 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1607}
1608
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001610{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001611 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001612 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001613
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001614 /* Some Broadcom based Bluetooth controllers do not support the
1615 * Delete Stored Link Key command. They are clearly indicating its
1616 * absence in the bit mask of supported commands.
1617 *
1618 * Check the supported commands and only if the the command is marked
1619 * as supported send it. If not supported assume that the controller
1620 * does not have actual support for stored link keys which makes this
1621 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001622 *
1623 * Some controllers indicate that they support handling deleting
1624 * stored link keys, but they don't. The quirk lets a driver
1625 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001626 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001627 if (hdev->commands[6] & 0x80 &&
1628 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001629 struct hci_cp_delete_stored_link_key cp;
1630
1631 bacpy(&cp.bdaddr, BDADDR_ANY);
1632 cp.delete_all = 0x01;
1633 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1634 sizeof(cp), &cp);
1635 }
1636
Johan Hedberg2177bab2013-03-05 20:37:43 +02001637 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001638 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001639
Andre Guedes9193c6e2014-07-01 18:10:09 -03001640 if (lmp_le_capable(hdev)) {
1641 u8 events[8];
1642
1643 memset(events, 0, sizeof(events));
1644 events[0] = 0x1f;
Andre Guedes662bc2e2014-07-01 18:10:10 -03001645
1646 /* If controller supports the Connection Parameters Request
1647 * Link Layer Procedure, enable the corresponding event.
1648 */
1649 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1650 events[0] |= 0x20; /* LE Remote Connection
1651 * Parameter Request
1652 */
1653
Andre Guedes9193c6e2014-07-01 18:10:09 -03001654 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1655 events);
1656
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001657 if (hdev->commands[25] & 0x40) {
1658 /* Read LE Advertising Channel TX Power */
1659 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1660 }
1661
Johan Hedberg42c6b122013-03-05 20:37:49 +02001662 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001663 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001664
1665 /* Read features beyond page 1 if available */
1666 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1667 struct hci_cp_read_local_ext_features cp;
1668
1669 cp.page = p;
1670 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1671 sizeof(cp), &cp);
1672 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001673}
1674
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001675static void hci_init4_req(struct hci_request *req, unsigned long opt)
1676{
1677 struct hci_dev *hdev = req->hdev;
1678
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001679 /* Set event mask page 2 if the HCI command for it is supported */
1680 if (hdev->commands[22] & 0x04)
1681 hci_set_event_mask_page_2(req);
1682
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001683 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001684 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001685 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001686
1687 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001688 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001689 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001690 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1691 u8 support = 0x01;
1692 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1693 sizeof(support), &support);
1694 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001695}
1696
Johan Hedberg2177bab2013-03-05 20:37:43 +02001697static int __hci_init(struct hci_dev *hdev)
1698{
1699 int err;
1700
1701 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1702 if (err < 0)
1703 return err;
1704
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001705 /* The Device Under Test (DUT) mode is special and available for
1706 * all controller types. So just create it early on.
1707 */
1708 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1709 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1710 &dut_mode_fops);
1711 }
1712
Johan Hedberg2177bab2013-03-05 20:37:43 +02001713 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1714 * BR/EDR/LE type controllers. AMP controllers only need the
1715 * first stage init.
1716 */
1717 if (hdev->dev_type != HCI_BREDR)
1718 return 0;
1719
1720 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1721 if (err < 0)
1722 return err;
1723
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001724 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1725 if (err < 0)
1726 return err;
1727
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001728 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1729 if (err < 0)
1730 return err;
1731
1732 /* Only create debugfs entries during the initial setup
1733 * phase and not every time the controller gets powered on.
1734 */
1735 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1736 return 0;
1737
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001738 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1739 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001740 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1741 &hdev->manufacturer);
1742 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1743 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001744 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1745 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001746 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1747 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001748 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1749
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001750 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1751 &conn_info_min_age_fops);
1752 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1753 &conn_info_max_age_fops);
1754
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001755 if (lmp_bredr_capable(hdev)) {
1756 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1757 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001758 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1759 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001760 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1761 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001762 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1763 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001764 }
1765
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001766 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001767 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1768 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001769 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1770 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001771 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1772 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001773 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001774
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001775 if (lmp_sniff_capable(hdev)) {
1776 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1777 hdev, &idle_timeout_fops);
1778 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1779 hdev, &sniff_min_interval_fops);
1780 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1781 hdev, &sniff_max_interval_fops);
1782 }
1783
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001784 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001785 debugfs_create_file("identity", 0400, hdev->debugfs,
1786 hdev, &identity_fops);
1787 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1788 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001789 debugfs_create_file("random_address", 0444, hdev->debugfs,
1790 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001791 debugfs_create_file("static_address", 0444, hdev->debugfs,
1792 hdev, &static_address_fops);
1793
1794 /* For controllers with a public address, provide a debug
1795 * option to force the usage of the configured static
1796 * address. By default the public address is used.
1797 */
1798 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1799 debugfs_create_file("force_static_address", 0644,
1800 hdev->debugfs, hdev,
1801 &force_static_address_fops);
1802
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001803 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1804 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001805 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1806 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001807 debugfs_create_file("identity_resolving_keys", 0400,
1808 hdev->debugfs, hdev,
1809 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001810 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1811 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001812 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1813 hdev, &conn_min_interval_fops);
1814 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1815 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001816 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1817 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001818 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1819 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001820 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1821 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001822 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1823 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001824 debugfs_create_u16("discov_interleaved_timeout", 0644,
1825 hdev->debugfs,
1826 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001827 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001828
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001829 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001830}
1831
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001832static void hci_init0_req(struct hci_request *req, unsigned long opt)
1833{
1834 struct hci_dev *hdev = req->hdev;
1835
1836 BT_DBG("%s %ld", hdev->name, opt);
1837
1838 /* Reset */
1839 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1840 hci_reset_req(req, 0);
1841
1842 /* Read Local Version */
1843 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1844
1845 /* Read BD Address */
1846 if (hdev->set_bdaddr)
1847 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1848}
1849
1850static int __hci_unconf_init(struct hci_dev *hdev)
1851{
1852 int err;
1853
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001854 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1855 return 0;
1856
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001857 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1858 if (err < 0)
1859 return err;
1860
1861 return 0;
1862}
1863
Johan Hedberg42c6b122013-03-05 20:37:49 +02001864static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865{
1866 __u8 scan = opt;
1867
Johan Hedberg42c6b122013-03-05 20:37:49 +02001868 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869
1870 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001871 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872}
1873
Johan Hedberg42c6b122013-03-05 20:37:49 +02001874static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
1876 __u8 auth = opt;
1877
Johan Hedberg42c6b122013-03-05 20:37:49 +02001878 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
1880 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001881 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882}
1883
Johan Hedberg42c6b122013-03-05 20:37:49 +02001884static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885{
1886 __u8 encrypt = opt;
1887
Johan Hedberg42c6b122013-03-05 20:37:49 +02001888 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001890 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001891 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892}
1893
Johan Hedberg42c6b122013-03-05 20:37:49 +02001894static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001895{
1896 __le16 policy = cpu_to_le16(opt);
1897
Johan Hedberg42c6b122013-03-05 20:37:49 +02001898 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001899
1900 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001901 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001902}
1903
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001904/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 * Device is held on return. */
1906struct hci_dev *hci_dev_get(int index)
1907{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001908 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 BT_DBG("%d", index);
1911
1912 if (index < 0)
1913 return NULL;
1914
1915 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001916 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 if (d->id == index) {
1918 hdev = hci_dev_hold(d);
1919 break;
1920 }
1921 }
1922 read_unlock(&hci_dev_list_lock);
1923 return hdev;
1924}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
1926/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001927
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001928bool hci_discovery_active(struct hci_dev *hdev)
1929{
1930 struct discovery_state *discov = &hdev->discovery;
1931
Andre Guedes6fbe1952012-02-03 17:47:58 -03001932 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001933 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001934 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001935 return true;
1936
Andre Guedes6fbe1952012-02-03 17:47:58 -03001937 default:
1938 return false;
1939 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001940}
1941
Johan Hedbergff9ef572012-01-04 14:23:45 +02001942void hci_discovery_set_state(struct hci_dev *hdev, int state)
1943{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001944 int old_state = hdev->discovery.state;
1945
Johan Hedbergff9ef572012-01-04 14:23:45 +02001946 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1947
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001948 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001949 return;
1950
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001951 hdev->discovery.state = state;
1952
Johan Hedbergff9ef572012-01-04 14:23:45 +02001953 switch (state) {
1954 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001955 hci_update_background_scan(hdev);
1956
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001957 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001958 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001959 break;
1960 case DISCOVERY_STARTING:
1961 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001962 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001963 mgmt_discovering(hdev, 1);
1964 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001965 case DISCOVERY_RESOLVING:
1966 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001967 case DISCOVERY_STOPPING:
1968 break;
1969 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001970}
1971
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001972void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973{
Johan Hedberg30883512012-01-04 14:16:21 +02001974 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001975 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
Johan Hedberg561aafb2012-01-04 13:31:59 +02001977 list_for_each_entry_safe(p, n, &cache->all, all) {
1978 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001979 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001981
1982 INIT_LIST_HEAD(&cache->unknown);
1983 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984}
1985
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001986struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1987 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988{
Johan Hedberg30883512012-01-04 14:16:21 +02001989 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 struct inquiry_entry *e;
1991
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001992 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
Johan Hedberg561aafb2012-01-04 13:31:59 +02001994 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001996 return e;
1997 }
1998
1999 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000}
2001
Johan Hedberg561aafb2012-01-04 13:31:59 +02002002struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002003 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002004{
Johan Hedberg30883512012-01-04 14:16:21 +02002005 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002006 struct inquiry_entry *e;
2007
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002008 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002009
2010 list_for_each_entry(e, &cache->unknown, list) {
2011 if (!bacmp(&e->data.bdaddr, bdaddr))
2012 return e;
2013 }
2014
2015 return NULL;
2016}
2017
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002018struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002019 bdaddr_t *bdaddr,
2020 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002021{
2022 struct discovery_state *cache = &hdev->discovery;
2023 struct inquiry_entry *e;
2024
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002025 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002026
2027 list_for_each_entry(e, &cache->resolve, list) {
2028 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2029 return e;
2030 if (!bacmp(&e->data.bdaddr, bdaddr))
2031 return e;
2032 }
2033
2034 return NULL;
2035}
2036
Johan Hedberga3d4e202012-01-09 00:53:02 +02002037void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002038 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002039{
2040 struct discovery_state *cache = &hdev->discovery;
2041 struct list_head *pos = &cache->resolve;
2042 struct inquiry_entry *p;
2043
2044 list_del(&ie->list);
2045
2046 list_for_each_entry(p, &cache->resolve, list) {
2047 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002048 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002049 break;
2050 pos = &p->list;
2051 }
2052
2053 list_add(&ie->list, pos);
2054}
2055
Marcel Holtmannaf589252014-07-01 14:11:20 +02002056u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2057 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058{
Johan Hedberg30883512012-01-04 14:16:21 +02002059 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002060 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002061 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002063 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
Szymon Janc2b2fec42012-11-20 11:38:54 +01002065 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2066
Marcel Holtmannaf589252014-07-01 14:11:20 +02002067 if (!data->ssp_mode)
2068 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002069
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002070 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002071 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002072 if (!ie->data.ssp_mode)
2073 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002074
Johan Hedberga3d4e202012-01-09 00:53:02 +02002075 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002076 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002077 ie->data.rssi = data->rssi;
2078 hci_inquiry_cache_update_resolve(hdev, ie);
2079 }
2080
Johan Hedberg561aafb2012-01-04 13:31:59 +02002081 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002082 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002083
Johan Hedberg561aafb2012-01-04 13:31:59 +02002084 /* Entry not in the cache. Add new one. */
2085 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002086 if (!ie) {
2087 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2088 goto done;
2089 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002090
2091 list_add(&ie->all, &cache->all);
2092
2093 if (name_known) {
2094 ie->name_state = NAME_KNOWN;
2095 } else {
2096 ie->name_state = NAME_NOT_KNOWN;
2097 list_add(&ie->list, &cache->unknown);
2098 }
2099
2100update:
2101 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002102 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002103 ie->name_state = NAME_KNOWN;
2104 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 }
2106
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002107 memcpy(&ie->data, data, sizeof(*data));
2108 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002110
2111 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002112 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002113
Marcel Holtmannaf589252014-07-01 14:11:20 +02002114done:
2115 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116}
2117
2118static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2119{
Johan Hedberg30883512012-01-04 14:16:21 +02002120 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 struct inquiry_info *info = (struct inquiry_info *) buf;
2122 struct inquiry_entry *e;
2123 int copied = 0;
2124
Johan Hedberg561aafb2012-01-04 13:31:59 +02002125 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002127
2128 if (copied >= num)
2129 break;
2130
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 bacpy(&info->bdaddr, &data->bdaddr);
2132 info->pscan_rep_mode = data->pscan_rep_mode;
2133 info->pscan_period_mode = data->pscan_period_mode;
2134 info->pscan_mode = data->pscan_mode;
2135 memcpy(info->dev_class, data->dev_class, 3);
2136 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002139 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 }
2141
2142 BT_DBG("cache %p, copied %d", cache, copied);
2143 return copied;
2144}
2145
Johan Hedberg42c6b122013-03-05 20:37:49 +02002146static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147{
2148 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002149 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 struct hci_cp_inquiry cp;
2151
2152 BT_DBG("%s", hdev->name);
2153
2154 if (test_bit(HCI_INQUIRY, &hdev->flags))
2155 return;
2156
2157 /* Start Inquiry */
2158 memcpy(&cp.lap, &ir->lap, 3);
2159 cp.length = ir->length;
2160 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002161 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162}
2163
Andre Guedes3e13fa12013-03-27 20:04:56 -03002164static int wait_inquiry(void *word)
2165{
2166 schedule();
2167 return signal_pending(current);
2168}
2169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170int hci_inquiry(void __user *arg)
2171{
2172 __u8 __user *ptr = arg;
2173 struct hci_inquiry_req ir;
2174 struct hci_dev *hdev;
2175 int err = 0, do_inquiry = 0, max_rsp;
2176 long timeo;
2177 __u8 *buf;
2178
2179 if (copy_from_user(&ir, ptr, sizeof(ir)))
2180 return -EFAULT;
2181
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002182 hdev = hci_dev_get(ir.dev_id);
2183 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 return -ENODEV;
2185
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002186 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2187 err = -EBUSY;
2188 goto done;
2189 }
2190
Marcel Holtmann4a964402014-07-02 19:10:33 +02002191 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002192 err = -EOPNOTSUPP;
2193 goto done;
2194 }
2195
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002196 if (hdev->dev_type != HCI_BREDR) {
2197 err = -EOPNOTSUPP;
2198 goto done;
2199 }
2200
Johan Hedberg56f87902013-10-02 13:43:13 +03002201 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2202 err = -EOPNOTSUPP;
2203 goto done;
2204 }
2205
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002206 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002207 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002208 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002209 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 do_inquiry = 1;
2211 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002212 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
Marcel Holtmann04837f62006-07-03 10:02:33 +02002214 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002215
2216 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002217 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2218 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002219 if (err < 0)
2220 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002221
2222 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2223 * cleared). If it is interrupted by a signal, return -EINTR.
2224 */
2225 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2226 TASK_INTERRUPTIBLE))
2227 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002230 /* for unlimited number of responses we will use buffer with
2231 * 255 entries
2232 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2234
2235 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2236 * copy it to the user space.
2237 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002238 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002239 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 err = -ENOMEM;
2241 goto done;
2242 }
2243
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002244 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002246 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
2248 BT_DBG("num_rsp %d", ir.num_rsp);
2249
2250 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2251 ptr += sizeof(ir);
2252 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002253 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002255 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 err = -EFAULT;
2257
2258 kfree(buf);
2259
2260done:
2261 hci_dev_put(hdev);
2262 return err;
2263}
2264
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002265static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 int ret = 0;
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 BT_DBG("%s %p", hdev->name, hdev);
2270
2271 hci_req_lock(hdev);
2272
Johan Hovold94324962012-03-15 14:48:41 +01002273 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2274 ret = -ENODEV;
2275 goto done;
2276 }
2277
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002278 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2279 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002280 /* Check for rfkill but allow the HCI setup stage to
2281 * proceed (which in itself doesn't cause any RF activity).
2282 */
2283 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2284 ret = -ERFKILL;
2285 goto done;
2286 }
2287
2288 /* Check for valid public address or a configured static
2289 * random adddress, but let the HCI setup proceed to
2290 * be able to determine if there is a public address
2291 * or not.
2292 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002293 * In case of user channel usage, it is not important
2294 * if a public address or static random address is
2295 * available.
2296 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002297 * This check is only valid for BR/EDR controllers
2298 * since AMP controllers do not have an address.
2299 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002300 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2301 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002302 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2303 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2304 ret = -EADDRNOTAVAIL;
2305 goto done;
2306 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002307 }
2308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 if (test_bit(HCI_UP, &hdev->flags)) {
2310 ret = -EALREADY;
2311 goto done;
2312 }
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 if (hdev->open(hdev)) {
2315 ret = -EIO;
2316 goto done;
2317 }
2318
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002319 atomic_set(&hdev->cmd_cnt, 1);
2320 set_bit(HCI_INIT, &hdev->flags);
2321
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002322 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2323 if (hdev->setup)
2324 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002325
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002326 /* The transport driver can set these quirks before
2327 * creating the HCI device or in its setup callback.
2328 *
2329 * In case any of them is set, the controller has to
2330 * start up as unconfigured.
2331 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002332 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2333 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002334 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002335
2336 /* For an unconfigured controller it is required to
2337 * read at least the version information provided by
2338 * the Read Local Version Information command.
2339 *
2340 * If the set_bdaddr driver callback is provided, then
2341 * also the original Bluetooth public device address
2342 * will be read using the Read BD Address command.
2343 */
2344 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2345 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002346 }
2347
Marcel Holtmann9713c172014-07-06 12:11:15 +02002348 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2349 /* If public address change is configured, ensure that
2350 * the address gets programmed. If the driver does not
2351 * support changing the public address, fail the power
2352 * on procedure.
2353 */
2354 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2355 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002356 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2357 else
2358 ret = -EADDRNOTAVAIL;
2359 }
2360
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002361 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002362 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002363 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002364 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 }
2366
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002367 clear_bit(HCI_INIT, &hdev->flags);
2368
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 if (!ret) {
2370 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002371 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 set_bit(HCI_UP, &hdev->flags);
2373 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002374 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002375 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002376 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002377 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002378 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002379 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002380 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002381 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002382 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002383 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002385 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002386 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002387 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
2389 skb_queue_purge(&hdev->cmd_q);
2390 skb_queue_purge(&hdev->rx_q);
2391
2392 if (hdev->flush)
2393 hdev->flush(hdev);
2394
2395 if (hdev->sent_cmd) {
2396 kfree_skb(hdev->sent_cmd);
2397 hdev->sent_cmd = NULL;
2398 }
2399
2400 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002401 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 }
2403
2404done:
2405 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 return ret;
2407}
2408
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002409/* ---- HCI ioctl helpers ---- */
2410
2411int hci_dev_open(__u16 dev)
2412{
2413 struct hci_dev *hdev;
2414 int err;
2415
2416 hdev = hci_dev_get(dev);
2417 if (!hdev)
2418 return -ENODEV;
2419
Marcel Holtmann4a964402014-07-02 19:10:33 +02002420 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002421 * up as user channel. Trying to bring them up as normal devices
2422 * will result into a failure. Only user channel operation is
2423 * possible.
2424 *
2425 * When this function is called for a user channel, the flag
2426 * HCI_USER_CHANNEL will be set first before attempting to
2427 * open the device.
2428 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002429 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002430 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2431 err = -EOPNOTSUPP;
2432 goto done;
2433 }
2434
Johan Hedberge1d08f42013-10-01 22:44:50 +03002435 /* We need to ensure that no other power on/off work is pending
2436 * before proceeding to call hci_dev_do_open. This is
2437 * particularly important if the setup procedure has not yet
2438 * completed.
2439 */
2440 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2441 cancel_delayed_work(&hdev->power_off);
2442
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002443 /* After this call it is guaranteed that the setup procedure
2444 * has finished. This means that error conditions like RFKILL
2445 * or no valid public or static random address apply.
2446 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002447 flush_workqueue(hdev->req_workqueue);
2448
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002449 /* For controllers not using the management interface and that
2450 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2451 * so that pairing works for them. Once the management interface
2452 * is in use this bit will be cleared again and userspace has
2453 * to explicitly enable it.
2454 */
2455 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2456 !test_bit(HCI_MGMT, &hdev->dev_flags))
2457 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2458
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002459 err = hci_dev_do_open(hdev);
2460
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002461done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002462 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002463 return err;
2464}
2465
Johan Hedbergd7347f32014-07-04 12:37:23 +03002466/* This function requires the caller holds hdev->lock */
2467static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2468{
2469 struct hci_conn_params *p;
2470
2471 list_for_each_entry(p, &hdev->le_conn_params, list)
2472 list_del_init(&p->action);
2473
2474 BT_DBG("All LE pending actions cleared");
2475}
2476
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477static int hci_dev_do_close(struct hci_dev *hdev)
2478{
2479 BT_DBG("%s %p", hdev->name, hdev);
2480
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002481 cancel_delayed_work(&hdev->power_off);
2482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 hci_req_cancel(hdev, ENODEV);
2484 hci_req_lock(hdev);
2485
2486 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002487 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 hci_req_unlock(hdev);
2489 return 0;
2490 }
2491
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002492 /* Flush RX and TX works */
2493 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002494 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002496 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002497 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002498 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002499 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002500 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002501 }
2502
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002503 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002504 cancel_delayed_work(&hdev->service_cache);
2505
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002506 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002507
2508 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2509 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002510
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002511 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002512 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002514 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002515 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
2517 hci_notify(hdev, HCI_DEV_DOWN);
2518
2519 if (hdev->flush)
2520 hdev->flush(hdev);
2521
2522 /* Reset device */
2523 skb_queue_purge(&hdev->cmd_q);
2524 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002525 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2526 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002527 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002529 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 clear_bit(HCI_INIT, &hdev->flags);
2531 }
2532
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002533 /* flush cmd work */
2534 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
2536 /* Drop queues */
2537 skb_queue_purge(&hdev->rx_q);
2538 skb_queue_purge(&hdev->cmd_q);
2539 skb_queue_purge(&hdev->raw_q);
2540
2541 /* Drop last sent command */
2542 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002543 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 kfree_skb(hdev->sent_cmd);
2545 hdev->sent_cmd = NULL;
2546 }
2547
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002548 kfree_skb(hdev->recv_evt);
2549 hdev->recv_evt = NULL;
2550
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 /* After this point our queues are empty
2552 * and no tasks are scheduled. */
2553 hdev->close(hdev);
2554
Johan Hedberg35b973c2013-03-15 17:06:59 -05002555 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002556 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002557 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2558
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002559 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2560 if (hdev->dev_type == HCI_BREDR) {
2561 hci_dev_lock(hdev);
2562 mgmt_powered(hdev, 0);
2563 hci_dev_unlock(hdev);
2564 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002565 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002566
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002567 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002568 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002569
Johan Hedberge59fda82012-02-22 18:11:53 +02002570 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002571 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002572 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002573
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 hci_req_unlock(hdev);
2575
2576 hci_dev_put(hdev);
2577 return 0;
2578}
2579
2580int hci_dev_close(__u16 dev)
2581{
2582 struct hci_dev *hdev;
2583 int err;
2584
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002585 hdev = hci_dev_get(dev);
2586 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002588
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002589 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2590 err = -EBUSY;
2591 goto done;
2592 }
2593
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002594 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2595 cancel_delayed_work(&hdev->power_off);
2596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002598
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002599done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 hci_dev_put(hdev);
2601 return err;
2602}
2603
2604int hci_dev_reset(__u16 dev)
2605{
2606 struct hci_dev *hdev;
2607 int ret = 0;
2608
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002609 hdev = hci_dev_get(dev);
2610 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 return -ENODEV;
2612
2613 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Marcel Holtmann808a0492013-08-26 20:57:58 -07002615 if (!test_bit(HCI_UP, &hdev->flags)) {
2616 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002618 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002620 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2621 ret = -EBUSY;
2622 goto done;
2623 }
2624
Marcel Holtmann4a964402014-07-02 19:10:33 +02002625 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002626 ret = -EOPNOTSUPP;
2627 goto done;
2628 }
2629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 /* Drop queues */
2631 skb_queue_purge(&hdev->rx_q);
2632 skb_queue_purge(&hdev->cmd_q);
2633
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002634 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002635 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002637 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638
2639 if (hdev->flush)
2640 hdev->flush(hdev);
2641
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002642 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002643 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002645 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646
2647done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 hci_req_unlock(hdev);
2649 hci_dev_put(hdev);
2650 return ret;
2651}
2652
2653int hci_dev_reset_stat(__u16 dev)
2654{
2655 struct hci_dev *hdev;
2656 int ret = 0;
2657
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002658 hdev = hci_dev_get(dev);
2659 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 return -ENODEV;
2661
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002662 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2663 ret = -EBUSY;
2664 goto done;
2665 }
2666
Marcel Holtmann4a964402014-07-02 19:10:33 +02002667 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002668 ret = -EOPNOTSUPP;
2669 goto done;
2670 }
2671
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2673
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002674done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 return ret;
2677}
2678
Johan Hedberg123abc02014-07-10 12:09:07 +03002679static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2680{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002681 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002682
2683 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2684
2685 if ((scan & SCAN_PAGE))
2686 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2687 &hdev->dev_flags);
2688 else
2689 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2690 &hdev->dev_flags);
2691
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002692 if ((scan & SCAN_INQUIRY)) {
2693 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2694 &hdev->dev_flags);
2695 } else {
2696 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2697 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2698 &hdev->dev_flags);
2699 }
2700
Johan Hedberg123abc02014-07-10 12:09:07 +03002701 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2702 return;
2703
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002704 if (conn_changed || discov_changed) {
2705 /* In case this was disabled through mgmt */
2706 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2707
2708 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2709 mgmt_update_adv_data(hdev);
2710
Johan Hedberg123abc02014-07-10 12:09:07 +03002711 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002712 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002713}
2714
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715int hci_dev_cmd(unsigned int cmd, void __user *arg)
2716{
2717 struct hci_dev *hdev;
2718 struct hci_dev_req dr;
2719 int err = 0;
2720
2721 if (copy_from_user(&dr, arg, sizeof(dr)))
2722 return -EFAULT;
2723
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002724 hdev = hci_dev_get(dr.dev_id);
2725 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 return -ENODEV;
2727
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002728 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2729 err = -EBUSY;
2730 goto done;
2731 }
2732
Marcel Holtmann4a964402014-07-02 19:10:33 +02002733 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002734 err = -EOPNOTSUPP;
2735 goto done;
2736 }
2737
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002738 if (hdev->dev_type != HCI_BREDR) {
2739 err = -EOPNOTSUPP;
2740 goto done;
2741 }
2742
Johan Hedberg56f87902013-10-02 13:43:13 +03002743 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2744 err = -EOPNOTSUPP;
2745 goto done;
2746 }
2747
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 switch (cmd) {
2749 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002750 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2751 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 break;
2753
2754 case HCISETENCRYPT:
2755 if (!lmp_encrypt_capable(hdev)) {
2756 err = -EOPNOTSUPP;
2757 break;
2758 }
2759
2760 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2761 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002762 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2763 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 if (err)
2765 break;
2766 }
2767
Johan Hedberg01178cd2013-03-05 20:37:41 +02002768 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2769 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 break;
2771
2772 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002773 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2774 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002775
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002776 /* Ensure that the connectable and discoverable states
2777 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002778 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002779 if (!err)
2780 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 break;
2782
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002783 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002784 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2785 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002786 break;
2787
2788 case HCISETLINKMODE:
2789 hdev->link_mode = ((__u16) dr.dev_opt) &
2790 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2791 break;
2792
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 case HCISETPTYPE:
2794 hdev->pkt_type = (__u16) dr.dev_opt;
2795 break;
2796
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002798 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2799 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 break;
2801
2802 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002803 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2804 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 break;
2806
2807 default:
2808 err = -EINVAL;
2809 break;
2810 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002811
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002812done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 hci_dev_put(hdev);
2814 return err;
2815}
2816
2817int hci_get_dev_list(void __user *arg)
2818{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002819 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 struct hci_dev_list_req *dl;
2821 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 int n = 0, size, err;
2823 __u16 dev_num;
2824
2825 if (get_user(dev_num, (__u16 __user *) arg))
2826 return -EFAULT;
2827
2828 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2829 return -EINVAL;
2830
2831 size = sizeof(*dl) + dev_num * sizeof(*dr);
2832
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002833 dl = kzalloc(size, GFP_KERNEL);
2834 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 return -ENOMEM;
2836
2837 dr = dl->dev_req;
2838
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002839 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002840 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002841 unsigned long flags = hdev->flags;
2842
2843 /* When the auto-off is configured it means the transport
2844 * is running, but in that case still indicate that the
2845 * device is actually down.
2846 */
2847 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2848 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002849
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002851 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002852
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 if (++n >= dev_num)
2854 break;
2855 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002856 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
2858 dl->dev_num = n;
2859 size = sizeof(*dl) + n * sizeof(*dr);
2860
2861 err = copy_to_user(arg, dl, size);
2862 kfree(dl);
2863
2864 return err ? -EFAULT : 0;
2865}
2866
2867int hci_get_dev_info(void __user *arg)
2868{
2869 struct hci_dev *hdev;
2870 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002871 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 int err = 0;
2873
2874 if (copy_from_user(&di, arg, sizeof(di)))
2875 return -EFAULT;
2876
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002877 hdev = hci_dev_get(di.dev_id);
2878 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 return -ENODEV;
2880
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002881 /* When the auto-off is configured it means the transport
2882 * is running, but in that case still indicate that the
2883 * device is actually down.
2884 */
2885 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2886 flags = hdev->flags & ~BIT(HCI_UP);
2887 else
2888 flags = hdev->flags;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002889
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 strcpy(di.name, hdev->name);
2891 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002892 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002893 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002895 if (lmp_bredr_capable(hdev)) {
2896 di.acl_mtu = hdev->acl_mtu;
2897 di.acl_pkts = hdev->acl_pkts;
2898 di.sco_mtu = hdev->sco_mtu;
2899 di.sco_pkts = hdev->sco_pkts;
2900 } else {
2901 di.acl_mtu = hdev->le_mtu;
2902 di.acl_pkts = hdev->le_pkts;
2903 di.sco_mtu = 0;
2904 di.sco_pkts = 0;
2905 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 di.link_policy = hdev->link_policy;
2907 di.link_mode = hdev->link_mode;
2908
2909 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2910 memcpy(&di.features, &hdev->features, sizeof(di.features));
2911
2912 if (copy_to_user(arg, &di, sizeof(di)))
2913 err = -EFAULT;
2914
2915 hci_dev_put(hdev);
2916
2917 return err;
2918}
2919
2920/* ---- Interface to HCI drivers ---- */
2921
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002922static int hci_rfkill_set_block(void *data, bool blocked)
2923{
2924 struct hci_dev *hdev = data;
2925
2926 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2927
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002928 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2929 return -EBUSY;
2930
Johan Hedberg5e130362013-09-13 08:58:17 +03002931 if (blocked) {
2932 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002933 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2934 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002935 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002936 } else {
2937 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002938 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002939
2940 return 0;
2941}
2942
2943static const struct rfkill_ops hci_rfkill_ops = {
2944 .set_block = hci_rfkill_set_block,
2945};
2946
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002947static void hci_power_on(struct work_struct *work)
2948{
2949 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002950 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002951
2952 BT_DBG("%s", hdev->name);
2953
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002954 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002955 if (err < 0) {
2956 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002957 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002958 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002959
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002960 /* During the HCI setup phase, a few error conditions are
2961 * ignored and they need to be checked now. If they are still
2962 * valid, it is important to turn the device back off.
2963 */
2964 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002965 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002966 (hdev->dev_type == HCI_BREDR &&
2967 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2968 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002969 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2970 hci_dev_do_close(hdev);
2971 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002972 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2973 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002974 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002975
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002976 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002977 /* For unconfigured devices, set the HCI_RAW flag
2978 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002979 */
2980 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2981 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002982
2983 /* For fully configured devices, this will send
2984 * the Index Added event. For unconfigured devices,
2985 * it will send Unconfigued Index Added event.
2986 *
2987 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2988 * and no event will be send.
2989 */
2990 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002991 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002992 /* When the controller is now configured, then it
2993 * is important to clear the HCI_RAW flag.
2994 */
2995 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2996 clear_bit(HCI_RAW, &hdev->flags);
2997
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002998 /* Powering on the controller with HCI_CONFIG set only
2999 * happens with the transition from unconfigured to
3000 * configured. This will send the Index Added event.
3001 */
3002 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003003 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003004}
3005
3006static void hci_power_off(struct work_struct *work)
3007{
Johan Hedberg32435532011-11-07 22:16:04 +02003008 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003009 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003010
3011 BT_DBG("%s", hdev->name);
3012
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003013 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003014}
3015
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003016static void hci_discov_off(struct work_struct *work)
3017{
3018 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003019
3020 hdev = container_of(work, struct hci_dev, discov_off.work);
3021
3022 BT_DBG("%s", hdev->name);
3023
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003024 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003025}
3026
Johan Hedberg35f74982014-02-18 17:14:32 +02003027void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003028{
Johan Hedberg48210022013-01-27 00:31:28 +02003029 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003030
Johan Hedberg48210022013-01-27 00:31:28 +02003031 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3032 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003033 kfree(uuid);
3034 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003035}
3036
Johan Hedberg35f74982014-02-18 17:14:32 +02003037void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003038{
3039 struct list_head *p, *n;
3040
3041 list_for_each_safe(p, n, &hdev->link_keys) {
3042 struct link_key *key;
3043
3044 key = list_entry(p, struct link_key, list);
3045
3046 list_del(p);
3047 kfree(key);
3048 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003049}
3050
Johan Hedberg35f74982014-02-18 17:14:32 +02003051void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003052{
3053 struct smp_ltk *k, *tmp;
3054
3055 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3056 list_del(&k->list);
3057 kfree(k);
3058 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003059}
3060
Johan Hedberg970c4e42014-02-18 10:19:33 +02003061void hci_smp_irks_clear(struct hci_dev *hdev)
3062{
3063 struct smp_irk *k, *tmp;
3064
3065 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3066 list_del(&k->list);
3067 kfree(k);
3068 }
3069}
3070
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003071struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3072{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003073 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003074
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003075 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003076 if (bacmp(bdaddr, &k->bdaddr) == 0)
3077 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003078
3079 return NULL;
3080}
3081
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303082static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003083 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003084{
3085 /* Legacy key */
3086 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303087 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003088
3089 /* Debug keys are insecure so don't store them persistently */
3090 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303091 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003092
3093 /* Changed combination key and there's no previous one */
3094 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303095 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003096
3097 /* Security mode 3 case */
3098 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303099 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003100
3101 /* Neither local nor remote side had no-bonding as requirement */
3102 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303103 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003104
3105 /* Local side had dedicated bonding as requirement */
3106 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303107 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003108
3109 /* Remote side had dedicated bonding as requirement */
3110 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303111 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003112
3113 /* If none of the above criteria match, then don't store the key
3114 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303115 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003116}
3117
Johan Hedberg98a0b842014-01-30 19:40:00 -08003118static bool ltk_type_master(u8 type)
3119{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03003120 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08003121}
3122
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003123struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003124 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003125{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003126 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003127
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003128 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003129 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003130 continue;
3131
Johan Hedberg98a0b842014-01-30 19:40:00 -08003132 if (ltk_type_master(k->type) != master)
3133 continue;
3134
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003135 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003136 }
3137
3138 return NULL;
3139}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003140
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003141struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003142 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003143{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003144 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003145
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003146 list_for_each_entry(k, &hdev->long_term_keys, list)
3147 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003148 bacmp(bdaddr, &k->bdaddr) == 0 &&
3149 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003150 return k;
3151
3152 return NULL;
3153}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003154
Johan Hedberg970c4e42014-02-18 10:19:33 +02003155struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3156{
3157 struct smp_irk *irk;
3158
3159 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3160 if (!bacmp(&irk->rpa, rpa))
3161 return irk;
3162 }
3163
3164 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3165 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3166 bacpy(&irk->rpa, rpa);
3167 return irk;
3168 }
3169 }
3170
3171 return NULL;
3172}
3173
3174struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3175 u8 addr_type)
3176{
3177 struct smp_irk *irk;
3178
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003179 /* Identity Address must be public or static random */
3180 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3181 return NULL;
3182
Johan Hedberg970c4e42014-02-18 10:19:33 +02003183 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3184 if (addr_type == irk->addr_type &&
3185 bacmp(bdaddr, &irk->bdaddr) == 0)
3186 return irk;
3187 }
3188
3189 return NULL;
3190}
3191
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003192struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003193 bdaddr_t *bdaddr, u8 *val, u8 type,
3194 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003195{
3196 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303197 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003198
3199 old_key = hci_find_link_key(hdev, bdaddr);
3200 if (old_key) {
3201 old_key_type = old_key->type;
3202 key = old_key;
3203 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003204 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003205 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003206 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003207 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003208 list_add(&key->list, &hdev->link_keys);
3209 }
3210
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003211 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003212
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003213 /* Some buggy controller combinations generate a changed
3214 * combination key for legacy pairing even when there's no
3215 * previous key */
3216 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003217 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003218 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003219 if (conn)
3220 conn->key_type = type;
3221 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003222
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003223 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003224 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003225 key->pin_len = pin_len;
3226
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003227 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003228 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003229 else
3230 key->type = type;
3231
Johan Hedberg7652ff62014-06-24 13:15:49 +03003232 if (persistent)
3233 *persistent = hci_persistent_key(hdev, conn, type,
3234 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003235
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003236 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003237}
3238
Johan Hedbergca9142b2014-02-19 14:57:44 +02003239struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003240 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003241 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003242{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003243 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003244 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003245
Johan Hedberg98a0b842014-01-30 19:40:00 -08003246 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003247 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003248 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003249 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003250 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003251 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003252 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003253 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003254 }
3255
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003256 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003257 key->bdaddr_type = addr_type;
3258 memcpy(key->val, tk, sizeof(key->val));
3259 key->authenticated = authenticated;
3260 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003261 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003262 key->enc_size = enc_size;
3263 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003264
Johan Hedbergca9142b2014-02-19 14:57:44 +02003265 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003266}
3267
Johan Hedbergca9142b2014-02-19 14:57:44 +02003268struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3269 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003270{
3271 struct smp_irk *irk;
3272
3273 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3274 if (!irk) {
3275 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3276 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003277 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003278
3279 bacpy(&irk->bdaddr, bdaddr);
3280 irk->addr_type = addr_type;
3281
3282 list_add(&irk->list, &hdev->identity_resolving_keys);
3283 }
3284
3285 memcpy(irk->val, val, 16);
3286 bacpy(&irk->rpa, rpa);
3287
Johan Hedbergca9142b2014-02-19 14:57:44 +02003288 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003289}
3290
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003291int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3292{
3293 struct link_key *key;
3294
3295 key = hci_find_link_key(hdev, bdaddr);
3296 if (!key)
3297 return -ENOENT;
3298
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003299 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003300
3301 list_del(&key->list);
3302 kfree(key);
3303
3304 return 0;
3305}
3306
Johan Hedberge0b2b272014-02-18 17:14:31 +02003307int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003308{
3309 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003310 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003311
3312 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003313 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003314 continue;
3315
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003316 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003317
3318 list_del(&k->list);
3319 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003320 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003321 }
3322
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003323 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003324}
3325
Johan Hedberga7ec7332014-02-18 17:14:35 +02003326void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3327{
3328 struct smp_irk *k, *tmp;
3329
Johan Hedberg668b7b12014-02-21 16:03:31 +02003330 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003331 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3332 continue;
3333
3334 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3335
3336 list_del(&k->list);
3337 kfree(k);
3338 }
3339}
3340
Ville Tervo6bd32322011-02-16 16:32:41 +02003341/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003342static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003343{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003344 struct hci_dev *hdev = container_of(work, struct hci_dev,
3345 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003346
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003347 if (hdev->sent_cmd) {
3348 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3349 u16 opcode = __le16_to_cpu(sent->opcode);
3350
3351 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3352 } else {
3353 BT_ERR("%s command tx timeout", hdev->name);
3354 }
3355
Ville Tervo6bd32322011-02-16 16:32:41 +02003356 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003357 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003358}
3359
Szymon Janc2763eda2011-03-22 13:12:22 +01003360struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003361 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003362{
3363 struct oob_data *data;
3364
3365 list_for_each_entry(data, &hdev->remote_oob_data, list)
3366 if (bacmp(bdaddr, &data->bdaddr) == 0)
3367 return data;
3368
3369 return NULL;
3370}
3371
3372int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3373{
3374 struct oob_data *data;
3375
3376 data = hci_find_remote_oob_data(hdev, bdaddr);
3377 if (!data)
3378 return -ENOENT;
3379
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003380 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003381
3382 list_del(&data->list);
3383 kfree(data);
3384
3385 return 0;
3386}
3387
Johan Hedberg35f74982014-02-18 17:14:32 +02003388void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003389{
3390 struct oob_data *data, *n;
3391
3392 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3393 list_del(&data->list);
3394 kfree(data);
3395 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003396}
3397
Marcel Holtmann07988722014-01-10 02:07:29 -08003398int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3399 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003400{
3401 struct oob_data *data;
3402
3403 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003404 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003405 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003406 if (!data)
3407 return -ENOMEM;
3408
3409 bacpy(&data->bdaddr, bdaddr);
3410 list_add(&data->list, &hdev->remote_oob_data);
3411 }
3412
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003413 memcpy(data->hash192, hash, sizeof(data->hash192));
3414 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003415
Marcel Holtmann07988722014-01-10 02:07:29 -08003416 memset(data->hash256, 0, sizeof(data->hash256));
3417 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3418
3419 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3420
3421 return 0;
3422}
3423
3424int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3425 u8 *hash192, u8 *randomizer192,
3426 u8 *hash256, u8 *randomizer256)
3427{
3428 struct oob_data *data;
3429
3430 data = hci_find_remote_oob_data(hdev, bdaddr);
3431 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003432 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003433 if (!data)
3434 return -ENOMEM;
3435
3436 bacpy(&data->bdaddr, bdaddr);
3437 list_add(&data->list, &hdev->remote_oob_data);
3438 }
3439
3440 memcpy(data->hash192, hash192, sizeof(data->hash192));
3441 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3442
3443 memcpy(data->hash256, hash256, sizeof(data->hash256));
3444 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3445
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003446 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003447
3448 return 0;
3449}
3450
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003451struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003452 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003453{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003454 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003455
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003456 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003457 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003458 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003459 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003460
3461 return NULL;
3462}
3463
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003464void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003465{
3466 struct list_head *p, *n;
3467
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003468 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003469 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003470
3471 list_del(p);
3472 kfree(b);
3473 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003474}
3475
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003476int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003477{
3478 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003479
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003480 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003481 return -EBADF;
3482
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003483 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003484 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003485
3486 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003487 if (!entry)
3488 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003489
3490 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003491 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003492
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003493 list_add(&entry->list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003494
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003495 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003496}
3497
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003498int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003499{
3500 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003501
Johan Hedberg35f74982014-02-18 17:14:32 +02003502 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003503 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003504 return 0;
3505 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003506
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003507 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003508 if (!entry)
3509 return -ENOENT;
3510
3511 list_del(&entry->list);
3512 kfree(entry);
3513
3514 return 0;
3515}
3516
Andre Guedes15819a72014-02-03 13:56:18 -03003517/* This function requires the caller holds hdev->lock */
3518struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3519 bdaddr_t *addr, u8 addr_type)
3520{
3521 struct hci_conn_params *params;
3522
Johan Hedberg738f6182014-07-03 19:33:51 +03003523 /* The conn params list only contains identity addresses */
3524 if (!hci_is_identity_address(addr, addr_type))
3525 return NULL;
3526
Andre Guedes15819a72014-02-03 13:56:18 -03003527 list_for_each_entry(params, &hdev->le_conn_params, list) {
3528 if (bacmp(&params->addr, addr) == 0 &&
3529 params->addr_type == addr_type) {
3530 return params;
3531 }
3532 }
3533
3534 return NULL;
3535}
3536
Andre Guedescef952c2014-02-26 20:21:49 -03003537static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3538{
3539 struct hci_conn *conn;
3540
3541 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3542 if (!conn)
3543 return false;
3544
3545 if (conn->dst_type != type)
3546 return false;
3547
3548 if (conn->state != BT_CONNECTED)
3549 return false;
3550
3551 return true;
3552}
3553
Andre Guedes15819a72014-02-03 13:56:18 -03003554/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003555struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3556 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003557{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003558 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003559
Johan Hedberg738f6182014-07-03 19:33:51 +03003560 /* The list only contains identity addresses */
3561 if (!hci_is_identity_address(addr, addr_type))
3562 return NULL;
3563
Johan Hedberg501f8822014-07-04 12:37:26 +03003564 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003565 if (bacmp(&param->addr, addr) == 0 &&
3566 param->addr_type == addr_type)
3567 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003568 }
3569
3570 return NULL;
3571}
3572
3573/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003574struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3575 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003576{
3577 struct hci_conn_params *params;
3578
Johan Hedbergc46245b2014-07-02 17:37:33 +03003579 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003580 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003581
3582 params = hci_conn_params_lookup(hdev, addr, addr_type);
3583 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003584 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003585
3586 params = kzalloc(sizeof(*params), GFP_KERNEL);
3587 if (!params) {
3588 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003589 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003590 }
3591
3592 bacpy(&params->addr, addr);
3593 params->addr_type = addr_type;
3594
3595 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003596 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003597
3598 params->conn_min_interval = hdev->le_conn_min_interval;
3599 params->conn_max_interval = hdev->le_conn_max_interval;
3600 params->conn_latency = hdev->le_conn_latency;
3601 params->supervision_timeout = hdev->le_supv_timeout;
3602 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3603
3604 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3605
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003606 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003607}
3608
3609/* This function requires the caller holds hdev->lock */
3610int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003611 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003612{
3613 struct hci_conn_params *params;
3614
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003615 params = hci_conn_params_add(hdev, addr, addr_type);
3616 if (!params)
3617 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003618
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003619 if (params->auto_connect == auto_connect)
3620 return 0;
3621
Johan Hedberg95305ba2014-07-04 12:37:21 +03003622 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003623
Andre Guedescef952c2014-02-26 20:21:49 -03003624 switch (auto_connect) {
3625 case HCI_AUTO_CONN_DISABLED:
3626 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003627 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003628 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003629 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003630 list_add(&params->action, &hdev->pend_le_reports);
3631 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003632 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003633 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003634 if (!is_connected(hdev, addr, addr_type)) {
3635 list_add(&params->action, &hdev->pend_le_conns);
3636 hci_update_background_scan(hdev);
3637 }
Andre Guedescef952c2014-02-26 20:21:49 -03003638 break;
3639 }
Andre Guedes15819a72014-02-03 13:56:18 -03003640
Johan Hedberg851efca2014-07-02 22:42:00 +03003641 params->auto_connect = auto_connect;
3642
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003643 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3644 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003645
3646 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003647}
3648
3649/* This function requires the caller holds hdev->lock */
3650void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3651{
3652 struct hci_conn_params *params;
3653
3654 params = hci_conn_params_lookup(hdev, addr, addr_type);
3655 if (!params)
3656 return;
3657
Johan Hedberg95305ba2014-07-04 12:37:21 +03003658 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003659 list_del(&params->list);
3660 kfree(params);
3661
Johan Hedberg95305ba2014-07-04 12:37:21 +03003662 hci_update_background_scan(hdev);
3663
Andre Guedes15819a72014-02-03 13:56:18 -03003664 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3665}
3666
3667/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003668void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3669{
3670 struct hci_conn_params *params, *tmp;
3671
3672 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3673 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3674 continue;
3675 list_del(&params->list);
3676 kfree(params);
3677 }
3678
3679 BT_DBG("All LE disabled connection parameters were removed");
3680}
3681
3682/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003683void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003684{
3685 struct hci_conn_params *params, *tmp;
3686
3687 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003688 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003689 list_del(&params->list);
3690 kfree(params);
3691 }
3692
Johan Hedberga2f41a82014-07-04 12:37:19 +03003693 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003694
Andre Guedes15819a72014-02-03 13:56:18 -03003695 BT_DBG("All LE connection parameters were removed");
3696}
3697
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003698static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003699{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003700 if (status) {
3701 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003702
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003703 hci_dev_lock(hdev);
3704 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3705 hci_dev_unlock(hdev);
3706 return;
3707 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003708}
3709
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003710static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003711{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003712 /* General inquiry access code (GIAC) */
3713 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3714 struct hci_request req;
3715 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003716 int err;
3717
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003718 if (status) {
3719 BT_ERR("Failed to disable LE scanning: status %d", status);
3720 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003721 }
3722
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003723 switch (hdev->discovery.type) {
3724 case DISCOV_TYPE_LE:
3725 hci_dev_lock(hdev);
3726 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3727 hci_dev_unlock(hdev);
3728 break;
3729
3730 case DISCOV_TYPE_INTERLEAVED:
3731 hci_req_init(&req, hdev);
3732
3733 memset(&cp, 0, sizeof(cp));
3734 memcpy(&cp.lap, lap, sizeof(cp.lap));
3735 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3736 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3737
3738 hci_dev_lock(hdev);
3739
3740 hci_inquiry_cache_flush(hdev);
3741
3742 err = hci_req_run(&req, inquiry_complete);
3743 if (err) {
3744 BT_ERR("Inquiry request failed: err %d", err);
3745 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3746 }
3747
3748 hci_dev_unlock(hdev);
3749 break;
3750 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003751}
3752
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003753static void le_scan_disable_work(struct work_struct *work)
3754{
3755 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003756 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003757 struct hci_request req;
3758 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003759
3760 BT_DBG("%s", hdev->name);
3761
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003762 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003763
Andre Guedesb1efcc22014-02-26 20:21:40 -03003764 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003765
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003766 err = hci_req_run(&req, le_scan_disable_work_complete);
3767 if (err)
3768 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003769}
3770
Johan Hedberg8d972502014-02-28 12:54:14 +02003771static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3772{
3773 struct hci_dev *hdev = req->hdev;
3774
3775 /* If we're advertising or initiating an LE connection we can't
3776 * go ahead and change the random address at this time. This is
3777 * because the eventual initiator address used for the
3778 * subsequently created connection will be undefined (some
3779 * controllers use the new address and others the one we had
3780 * when the operation started).
3781 *
3782 * In this kind of scenario skip the update and let the random
3783 * address be updated at the next cycle.
3784 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003785 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003786 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3787 BT_DBG("Deferring random address update");
3788 return;
3789 }
3790
3791 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3792}
3793
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003794int hci_update_random_address(struct hci_request *req, bool require_privacy,
3795 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003796{
3797 struct hci_dev *hdev = req->hdev;
3798 int err;
3799
3800 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003801 * current RPA has expired or there is something else than
3802 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003803 */
3804 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003805 int to;
3806
3807 *own_addr_type = ADDR_LE_DEV_RANDOM;
3808
3809 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003810 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003811 return 0;
3812
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003813 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003814 if (err < 0) {
3815 BT_ERR("%s failed to generate new RPA", hdev->name);
3816 return err;
3817 }
3818
Johan Hedberg8d972502014-02-28 12:54:14 +02003819 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003820
3821 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3822 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3823
3824 return 0;
3825 }
3826
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003827 /* In case of required privacy without resolvable private address,
3828 * use an unresolvable private address. This is useful for active
3829 * scanning and non-connectable advertising.
3830 */
3831 if (require_privacy) {
3832 bdaddr_t urpa;
3833
3834 get_random_bytes(&urpa, 6);
3835 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3836
3837 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003838 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003839 return 0;
3840 }
3841
Johan Hedbergebd3a742014-02-23 19:42:21 +02003842 /* If forcing static address is in use or there is no public
3843 * address use the static address as random address (but skip
3844 * the HCI command if the current random address is already the
3845 * static one.
3846 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003847 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003848 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3849 *own_addr_type = ADDR_LE_DEV_RANDOM;
3850 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3851 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3852 &hdev->static_addr);
3853 return 0;
3854 }
3855
3856 /* Neither privacy nor static address is being used so use a
3857 * public address.
3858 */
3859 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3860
3861 return 0;
3862}
3863
Johan Hedberga1f4c312014-02-27 14:05:41 +02003864/* Copy the Identity Address of the controller.
3865 *
3866 * If the controller has a public BD_ADDR, then by default use that one.
3867 * If this is a LE only controller without a public address, default to
3868 * the static random address.
3869 *
3870 * For debugging purposes it is possible to force controllers with a
3871 * public address to use the static random address instead.
3872 */
3873void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3874 u8 *bdaddr_type)
3875{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003876 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003877 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3878 bacpy(bdaddr, &hdev->static_addr);
3879 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3880 } else {
3881 bacpy(bdaddr, &hdev->bdaddr);
3882 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3883 }
3884}
3885
David Herrmann9be0dab2012-04-22 14:39:57 +02003886/* Alloc HCI device */
3887struct hci_dev *hci_alloc_dev(void)
3888{
3889 struct hci_dev *hdev;
3890
3891 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3892 if (!hdev)
3893 return NULL;
3894
David Herrmannb1b813d2012-04-22 14:39:58 +02003895 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3896 hdev->esco_type = (ESCO_HV1);
3897 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003898 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3899 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003900 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003901 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3902 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003903
David Herrmannb1b813d2012-04-22 14:39:58 +02003904 hdev->sniff_max_interval = 800;
3905 hdev->sniff_min_interval = 80;
3906
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003907 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003908 hdev->le_scan_interval = 0x0060;
3909 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003910 hdev->le_conn_min_interval = 0x0028;
3911 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003912 hdev->le_conn_latency = 0x0000;
3913 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003914
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003915 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003916 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003917 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3918 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003919
David Herrmannb1b813d2012-04-22 14:39:58 +02003920 mutex_init(&hdev->lock);
3921 mutex_init(&hdev->req_lock);
3922
3923 INIT_LIST_HEAD(&hdev->mgmt_pending);
3924 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003925 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003926 INIT_LIST_HEAD(&hdev->uuids);
3927 INIT_LIST_HEAD(&hdev->link_keys);
3928 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003929 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003930 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003931 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003932 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003933 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003934 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003935 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003936
3937 INIT_WORK(&hdev->rx_work, hci_rx_work);
3938 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3939 INIT_WORK(&hdev->tx_work, hci_tx_work);
3940 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003941
David Herrmannb1b813d2012-04-22 14:39:58 +02003942 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3943 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3944 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3945
David Herrmannb1b813d2012-04-22 14:39:58 +02003946 skb_queue_head_init(&hdev->rx_q);
3947 skb_queue_head_init(&hdev->cmd_q);
3948 skb_queue_head_init(&hdev->raw_q);
3949
3950 init_waitqueue_head(&hdev->req_wait_q);
3951
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003952 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003953
David Herrmannb1b813d2012-04-22 14:39:58 +02003954 hci_init_sysfs(hdev);
3955 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003956
3957 return hdev;
3958}
3959EXPORT_SYMBOL(hci_alloc_dev);
3960
3961/* Free HCI device */
3962void hci_free_dev(struct hci_dev *hdev)
3963{
David Herrmann9be0dab2012-04-22 14:39:57 +02003964 /* will free via device release */
3965 put_device(&hdev->dev);
3966}
3967EXPORT_SYMBOL(hci_free_dev);
3968
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969/* Register HCI device */
3970int hci_register_dev(struct hci_dev *hdev)
3971{
David Herrmannb1b813d2012-04-22 14:39:58 +02003972 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973
Marcel Holtmann74292d52014-07-06 15:50:27 +02003974 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 return -EINVAL;
3976
Mat Martineau08add512011-11-02 16:18:36 -07003977 /* Do not allow HCI_AMP devices to register at index 0,
3978 * so the index can be used as the AMP controller ID.
3979 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003980 switch (hdev->dev_type) {
3981 case HCI_BREDR:
3982 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3983 break;
3984 case HCI_AMP:
3985 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3986 break;
3987 default:
3988 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003990
Sasha Levin3df92b32012-05-27 22:36:56 +02003991 if (id < 0)
3992 return id;
3993
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 sprintf(hdev->name, "hci%d", id);
3995 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003996
3997 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3998
Kees Cookd8537542013-07-03 15:04:57 -07003999 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4000 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004001 if (!hdev->workqueue) {
4002 error = -ENOMEM;
4003 goto err;
4004 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004005
Kees Cookd8537542013-07-03 15:04:57 -07004006 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4007 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004008 if (!hdev->req_workqueue) {
4009 destroy_workqueue(hdev->workqueue);
4010 error = -ENOMEM;
4011 goto err;
4012 }
4013
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004014 if (!IS_ERR_OR_NULL(bt_debugfs))
4015 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4016
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004017 dev_set_name(&hdev->dev, "%s", hdev->name);
4018
Johan Hedberg99780a72014-02-18 10:40:07 +02004019 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4020 CRYPTO_ALG_ASYNC);
4021 if (IS_ERR(hdev->tfm_aes)) {
4022 BT_ERR("Unable to create crypto context");
4023 error = PTR_ERR(hdev->tfm_aes);
4024 hdev->tfm_aes = NULL;
4025 goto err_wqueue;
4026 }
4027
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004028 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004029 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02004030 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004032 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004033 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4034 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004035 if (hdev->rfkill) {
4036 if (rfkill_register(hdev->rfkill) < 0) {
4037 rfkill_destroy(hdev->rfkill);
4038 hdev->rfkill = NULL;
4039 }
4040 }
4041
Johan Hedberg5e130362013-09-13 08:58:17 +03004042 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4043 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4044
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004045 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004046 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004047
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004048 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004049 /* Assume BR/EDR support until proven otherwise (such as
4050 * through reading supported features during init.
4051 */
4052 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4053 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004054
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004055 write_lock(&hci_dev_list_lock);
4056 list_add(&hdev->list, &hci_dev_list);
4057 write_unlock(&hci_dev_list_lock);
4058
Marcel Holtmann4a964402014-07-02 19:10:33 +02004059 /* Devices that are marked for raw-only usage are unconfigured
4060 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004061 */
4062 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004063 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004064
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004066 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067
Johan Hedberg19202572013-01-14 22:33:51 +02004068 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004069
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004071
Johan Hedberg99780a72014-02-18 10:40:07 +02004072err_tfm:
4073 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004074err_wqueue:
4075 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004076 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004077err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004078 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004079
David Herrmann33ca9542011-10-08 14:58:49 +02004080 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081}
4082EXPORT_SYMBOL(hci_register_dev);
4083
4084/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004085void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086{
Sasha Levin3df92b32012-05-27 22:36:56 +02004087 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004088
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004089 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090
Johan Hovold94324962012-03-15 14:48:41 +01004091 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4092
Sasha Levin3df92b32012-05-27 22:36:56 +02004093 id = hdev->id;
4094
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004095 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004097 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098
4099 hci_dev_do_close(hdev);
4100
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304101 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004102 kfree_skb(hdev->reassembly[i]);
4103
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004104 cancel_work_sync(&hdev->power_on);
4105
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004106 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004107 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4108 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004109 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004110 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004111 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004112 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004113
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004114 /* mgmt_index_removed should take care of emptying the
4115 * pending list */
4116 BUG_ON(!list_empty(&hdev->mgmt_pending));
4117
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118 hci_notify(hdev, HCI_DEV_UNREG);
4119
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004120 if (hdev->rfkill) {
4121 rfkill_unregister(hdev->rfkill);
4122 rfkill_destroy(hdev->rfkill);
4123 }
4124
Johan Hedberg99780a72014-02-18 10:40:07 +02004125 if (hdev->tfm_aes)
4126 crypto_free_blkcipher(hdev->tfm_aes);
4127
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004128 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004129
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004130 debugfs_remove_recursive(hdev->debugfs);
4131
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004132 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004133 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004134
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004135 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004136 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004137 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004138 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004139 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004140 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004141 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004142 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004143 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004144 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004145 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004146
David Herrmanndc946bd2012-01-07 15:47:24 +01004147 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004148
4149 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150}
4151EXPORT_SYMBOL(hci_unregister_dev);
4152
4153/* Suspend HCI device */
4154int hci_suspend_dev(struct hci_dev *hdev)
4155{
4156 hci_notify(hdev, HCI_DEV_SUSPEND);
4157 return 0;
4158}
4159EXPORT_SYMBOL(hci_suspend_dev);
4160
4161/* Resume HCI device */
4162int hci_resume_dev(struct hci_dev *hdev)
4163{
4164 hci_notify(hdev, HCI_DEV_RESUME);
4165 return 0;
4166}
4167EXPORT_SYMBOL(hci_resume_dev);
4168
Marcel Holtmann76bca882009-11-18 00:40:39 +01004169/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004170int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004171{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004172 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004173 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004174 kfree_skb(skb);
4175 return -ENXIO;
4176 }
4177
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004178 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004179 bt_cb(skb)->incoming = 1;
4180
4181 /* Time stamp */
4182 __net_timestamp(skb);
4183
Marcel Holtmann76bca882009-11-18 00:40:39 +01004184 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004185 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004186
Marcel Holtmann76bca882009-11-18 00:40:39 +01004187 return 0;
4188}
4189EXPORT_SYMBOL(hci_recv_frame);
4190
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304191static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004192 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304193{
4194 int len = 0;
4195 int hlen = 0;
4196 int remain = count;
4197 struct sk_buff *skb;
4198 struct bt_skb_cb *scb;
4199
4200 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004201 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304202 return -EILSEQ;
4203
4204 skb = hdev->reassembly[index];
4205
4206 if (!skb) {
4207 switch (type) {
4208 case HCI_ACLDATA_PKT:
4209 len = HCI_MAX_FRAME_SIZE;
4210 hlen = HCI_ACL_HDR_SIZE;
4211 break;
4212 case HCI_EVENT_PKT:
4213 len = HCI_MAX_EVENT_SIZE;
4214 hlen = HCI_EVENT_HDR_SIZE;
4215 break;
4216 case HCI_SCODATA_PKT:
4217 len = HCI_MAX_SCO_SIZE;
4218 hlen = HCI_SCO_HDR_SIZE;
4219 break;
4220 }
4221
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004222 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304223 if (!skb)
4224 return -ENOMEM;
4225
4226 scb = (void *) skb->cb;
4227 scb->expect = hlen;
4228 scb->pkt_type = type;
4229
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304230 hdev->reassembly[index] = skb;
4231 }
4232
4233 while (count) {
4234 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004235 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304236
4237 memcpy(skb_put(skb, len), data, len);
4238
4239 count -= len;
4240 data += len;
4241 scb->expect -= len;
4242 remain = count;
4243
4244 switch (type) {
4245 case HCI_EVENT_PKT:
4246 if (skb->len == HCI_EVENT_HDR_SIZE) {
4247 struct hci_event_hdr *h = hci_event_hdr(skb);
4248 scb->expect = h->plen;
4249
4250 if (skb_tailroom(skb) < scb->expect) {
4251 kfree_skb(skb);
4252 hdev->reassembly[index] = NULL;
4253 return -ENOMEM;
4254 }
4255 }
4256 break;
4257
4258 case HCI_ACLDATA_PKT:
4259 if (skb->len == HCI_ACL_HDR_SIZE) {
4260 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4261 scb->expect = __le16_to_cpu(h->dlen);
4262
4263 if (skb_tailroom(skb) < scb->expect) {
4264 kfree_skb(skb);
4265 hdev->reassembly[index] = NULL;
4266 return -ENOMEM;
4267 }
4268 }
4269 break;
4270
4271 case HCI_SCODATA_PKT:
4272 if (skb->len == HCI_SCO_HDR_SIZE) {
4273 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4274 scb->expect = h->dlen;
4275
4276 if (skb_tailroom(skb) < scb->expect) {
4277 kfree_skb(skb);
4278 hdev->reassembly[index] = NULL;
4279 return -ENOMEM;
4280 }
4281 }
4282 break;
4283 }
4284
4285 if (scb->expect == 0) {
4286 /* Complete frame */
4287
4288 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004289 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304290
4291 hdev->reassembly[index] = NULL;
4292 return remain;
4293 }
4294 }
4295
4296 return remain;
4297}
4298
Marcel Holtmannef222012007-07-11 06:42:04 +02004299int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4300{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304301 int rem = 0;
4302
Marcel Holtmannef222012007-07-11 06:42:04 +02004303 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4304 return -EILSEQ;
4305
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004306 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004307 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304308 if (rem < 0)
4309 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004310
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304311 data += (count - rem);
4312 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004313 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004314
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304315 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004316}
4317EXPORT_SYMBOL(hci_recv_fragment);
4318
Suraj Sumangala99811512010-07-14 13:02:19 +05304319#define STREAM_REASSEMBLY 0
4320
4321int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4322{
4323 int type;
4324 int rem = 0;
4325
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004326 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304327 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4328
4329 if (!skb) {
4330 struct { char type; } *pkt;
4331
4332 /* Start of the frame */
4333 pkt = data;
4334 type = pkt->type;
4335
4336 data++;
4337 count--;
4338 } else
4339 type = bt_cb(skb)->pkt_type;
4340
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004341 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004342 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304343 if (rem < 0)
4344 return rem;
4345
4346 data += (count - rem);
4347 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004348 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304349
4350 return rem;
4351}
4352EXPORT_SYMBOL(hci_recv_stream_fragment);
4353
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354/* ---- Interface to upper protocols ---- */
4355
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356int hci_register_cb(struct hci_cb *cb)
4357{
4358 BT_DBG("%p name %s", cb, cb->name);
4359
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004360 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004362 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363
4364 return 0;
4365}
4366EXPORT_SYMBOL(hci_register_cb);
4367
4368int hci_unregister_cb(struct hci_cb *cb)
4369{
4370 BT_DBG("%p name %s", cb, cb->name);
4371
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004372 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004374 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375
4376 return 0;
4377}
4378EXPORT_SYMBOL(hci_unregister_cb);
4379
Marcel Holtmann51086992013-10-10 14:54:19 -07004380static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004382 int err;
4383
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004384 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004386 /* Time stamp */
4387 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004389 /* Send copy to monitor */
4390 hci_send_to_monitor(hdev, skb);
4391
4392 if (atomic_read(&hdev->promisc)) {
4393 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004394 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395 }
4396
4397 /* Get rid of skb owner, prior to sending to the driver. */
4398 skb_orphan(skb);
4399
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004400 err = hdev->send(hdev, skb);
4401 if (err < 0) {
4402 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4403 kfree_skb(skb);
4404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405}
4406
Johan Hedberg3119ae92013-03-05 20:37:44 +02004407void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4408{
4409 skb_queue_head_init(&req->cmd_q);
4410 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004411 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004412}
4413
4414int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4415{
4416 struct hci_dev *hdev = req->hdev;
4417 struct sk_buff *skb;
4418 unsigned long flags;
4419
4420 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4421
Andre Guedes5d73e032013-03-08 11:20:16 -03004422 /* If an error occured during request building, remove all HCI
4423 * commands queued on the HCI request queue.
4424 */
4425 if (req->err) {
4426 skb_queue_purge(&req->cmd_q);
4427 return req->err;
4428 }
4429
Johan Hedberg3119ae92013-03-05 20:37:44 +02004430 /* Do not allow empty requests */
4431 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004432 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004433
4434 skb = skb_peek_tail(&req->cmd_q);
4435 bt_cb(skb)->req.complete = complete;
4436
4437 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4438 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4439 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4440
4441 queue_work(hdev->workqueue, &hdev->cmd_work);
4442
4443 return 0;
4444}
4445
Marcel Holtmann899de762014-07-11 05:51:58 +02004446bool hci_req_pending(struct hci_dev *hdev)
4447{
4448 return (hdev->req_status == HCI_REQ_PEND);
4449}
4450
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004451static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004452 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453{
4454 int len = HCI_COMMAND_HDR_SIZE + plen;
4455 struct hci_command_hdr *hdr;
4456 struct sk_buff *skb;
4457
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004459 if (!skb)
4460 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461
4462 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004463 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464 hdr->plen = plen;
4465
4466 if (plen)
4467 memcpy(skb_put(skb, plen), param, plen);
4468
4469 BT_DBG("skb len %d", skb->len);
4470
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004471 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004472
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004473 return skb;
4474}
4475
4476/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004477int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4478 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004479{
4480 struct sk_buff *skb;
4481
4482 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4483
4484 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4485 if (!skb) {
4486 BT_ERR("%s no memory for command", hdev->name);
4487 return -ENOMEM;
4488 }
4489
Johan Hedberg11714b32013-03-05 20:37:47 +02004490 /* Stand-alone HCI commands must be flaged as
4491 * single-command requests.
4492 */
4493 bt_cb(skb)->req.start = true;
4494
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004496 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497
4498 return 0;
4499}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
Johan Hedberg71c76a12013-03-05 20:37:46 +02004501/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004502void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4503 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004504{
4505 struct hci_dev *hdev = req->hdev;
4506 struct sk_buff *skb;
4507
4508 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4509
Andre Guedes34739c12013-03-08 11:20:18 -03004510 /* If an error occured during request building, there is no point in
4511 * queueing the HCI command. We can simply return.
4512 */
4513 if (req->err)
4514 return;
4515
Johan Hedberg71c76a12013-03-05 20:37:46 +02004516 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4517 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004518 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4519 hdev->name, opcode);
4520 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004521 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004522 }
4523
4524 if (skb_queue_empty(&req->cmd_q))
4525 bt_cb(skb)->req.start = true;
4526
Johan Hedberg02350a72013-04-03 21:50:29 +03004527 bt_cb(skb)->req.event = event;
4528
Johan Hedberg71c76a12013-03-05 20:37:46 +02004529 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004530}
4531
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004532void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4533 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004534{
4535 hci_req_add_ev(req, opcode, plen, param, 0);
4536}
4537
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004539void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540{
4541 struct hci_command_hdr *hdr;
4542
4543 if (!hdev->sent_cmd)
4544 return NULL;
4545
4546 hdr = (void *) hdev->sent_cmd->data;
4547
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004548 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 return NULL;
4550
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004551 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552
4553 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4554}
4555
4556/* Send ACL data */
4557static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4558{
4559 struct hci_acl_hdr *hdr;
4560 int len = skb->len;
4561
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004562 skb_push(skb, HCI_ACL_HDR_SIZE);
4563 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004564 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004565 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4566 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567}
4568
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004569static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004570 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004572 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 struct hci_dev *hdev = conn->hdev;
4574 struct sk_buff *list;
4575
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004576 skb->len = skb_headlen(skb);
4577 skb->data_len = 0;
4578
4579 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004580
4581 switch (hdev->dev_type) {
4582 case HCI_BREDR:
4583 hci_add_acl_hdr(skb, conn->handle, flags);
4584 break;
4585 case HCI_AMP:
4586 hci_add_acl_hdr(skb, chan->handle, flags);
4587 break;
4588 default:
4589 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4590 return;
4591 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004592
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004593 list = skb_shinfo(skb)->frag_list;
4594 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595 /* Non fragmented */
4596 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4597
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004598 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 } else {
4600 /* Fragmented */
4601 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4602
4603 skb_shinfo(skb)->frag_list = NULL;
4604
4605 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004606 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004608 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004609
4610 flags &= ~ACL_START;
4611 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 do {
4613 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004614
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004615 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004616 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617
4618 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4619
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004620 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621 } while (list);
4622
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004623 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004625}
4626
4627void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4628{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004629 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004630
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004631 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004632
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004633 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004635 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637
4638/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004639void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640{
4641 struct hci_dev *hdev = conn->hdev;
4642 struct hci_sco_hdr hdr;
4643
4644 BT_DBG("%s len %d", hdev->name, skb->len);
4645
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004646 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647 hdr.dlen = skb->len;
4648
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004649 skb_push(skb, HCI_SCO_HDR_SIZE);
4650 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004651 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004653 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004654
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004656 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658
4659/* ---- HCI TX task (outgoing data) ---- */
4660
4661/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004662static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4663 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664{
4665 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004666 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004667 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004669 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004671
4672 rcu_read_lock();
4673
4674 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004675 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004677
4678 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4679 continue;
4680
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681 num++;
4682
4683 if (c->sent < min) {
4684 min = c->sent;
4685 conn = c;
4686 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004687
4688 if (hci_conn_num(hdev, type) == num)
4689 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 }
4691
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004692 rcu_read_unlock();
4693
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004695 int cnt, q;
4696
4697 switch (conn->type) {
4698 case ACL_LINK:
4699 cnt = hdev->acl_cnt;
4700 break;
4701 case SCO_LINK:
4702 case ESCO_LINK:
4703 cnt = hdev->sco_cnt;
4704 break;
4705 case LE_LINK:
4706 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4707 break;
4708 default:
4709 cnt = 0;
4710 BT_ERR("Unknown link type");
4711 }
4712
4713 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714 *quote = q ? q : 1;
4715 } else
4716 *quote = 0;
4717
4718 BT_DBG("conn %p quote %d", conn, *quote);
4719 return conn;
4720}
4721
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004722static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723{
4724 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004725 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726
Ville Tervobae1f5d92011-02-10 22:38:53 -03004727 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004728
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004729 rcu_read_lock();
4730
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004732 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004733 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004734 BT_ERR("%s killing stalled connection %pMR",
4735 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004736 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737 }
4738 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004739
4740 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741}
4742
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004743static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4744 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004745{
4746 struct hci_conn_hash *h = &hdev->conn_hash;
4747 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004748 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004749 struct hci_conn *conn;
4750 int cnt, q, conn_num = 0;
4751
4752 BT_DBG("%s", hdev->name);
4753
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004754 rcu_read_lock();
4755
4756 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004757 struct hci_chan *tmp;
4758
4759 if (conn->type != type)
4760 continue;
4761
4762 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4763 continue;
4764
4765 conn_num++;
4766
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004767 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004768 struct sk_buff *skb;
4769
4770 if (skb_queue_empty(&tmp->data_q))
4771 continue;
4772
4773 skb = skb_peek(&tmp->data_q);
4774 if (skb->priority < cur_prio)
4775 continue;
4776
4777 if (skb->priority > cur_prio) {
4778 num = 0;
4779 min = ~0;
4780 cur_prio = skb->priority;
4781 }
4782
4783 num++;
4784
4785 if (conn->sent < min) {
4786 min = conn->sent;
4787 chan = tmp;
4788 }
4789 }
4790
4791 if (hci_conn_num(hdev, type) == conn_num)
4792 break;
4793 }
4794
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004795 rcu_read_unlock();
4796
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004797 if (!chan)
4798 return NULL;
4799
4800 switch (chan->conn->type) {
4801 case ACL_LINK:
4802 cnt = hdev->acl_cnt;
4803 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004804 case AMP_LINK:
4805 cnt = hdev->block_cnt;
4806 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004807 case SCO_LINK:
4808 case ESCO_LINK:
4809 cnt = hdev->sco_cnt;
4810 break;
4811 case LE_LINK:
4812 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4813 break;
4814 default:
4815 cnt = 0;
4816 BT_ERR("Unknown link type");
4817 }
4818
4819 q = cnt / num;
4820 *quote = q ? q : 1;
4821 BT_DBG("chan %p quote %d", chan, *quote);
4822 return chan;
4823}
4824
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004825static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4826{
4827 struct hci_conn_hash *h = &hdev->conn_hash;
4828 struct hci_conn *conn;
4829 int num = 0;
4830
4831 BT_DBG("%s", hdev->name);
4832
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004833 rcu_read_lock();
4834
4835 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004836 struct hci_chan *chan;
4837
4838 if (conn->type != type)
4839 continue;
4840
4841 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4842 continue;
4843
4844 num++;
4845
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004846 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004847 struct sk_buff *skb;
4848
4849 if (chan->sent) {
4850 chan->sent = 0;
4851 continue;
4852 }
4853
4854 if (skb_queue_empty(&chan->data_q))
4855 continue;
4856
4857 skb = skb_peek(&chan->data_q);
4858 if (skb->priority >= HCI_PRIO_MAX - 1)
4859 continue;
4860
4861 skb->priority = HCI_PRIO_MAX - 1;
4862
4863 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004864 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004865 }
4866
4867 if (hci_conn_num(hdev, type) == num)
4868 break;
4869 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004870
4871 rcu_read_unlock();
4872
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004873}
4874
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004875static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4876{
4877 /* Calculate count of blocks used by this packet */
4878 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4879}
4880
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004881static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004883 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 /* ACL tx timeout must be longer than maximum
4885 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004886 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004887 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004888 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004890}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004891
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004892static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004893{
4894 unsigned int cnt = hdev->acl_cnt;
4895 struct hci_chan *chan;
4896 struct sk_buff *skb;
4897 int quote;
4898
4899 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004900
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004901 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004902 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004903 u32 priority = (skb_peek(&chan->data_q))->priority;
4904 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004905 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004906 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004907
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004908 /* Stop if priority has changed */
4909 if (skb->priority < priority)
4910 break;
4911
4912 skb = skb_dequeue(&chan->data_q);
4913
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004914 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004915 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004916
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004917 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918 hdev->acl_last_tx = jiffies;
4919
4920 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004921 chan->sent++;
4922 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923 }
4924 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004925
4926 if (cnt != hdev->acl_cnt)
4927 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004928}
4929
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004930static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004931{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004932 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004933 struct hci_chan *chan;
4934 struct sk_buff *skb;
4935 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004936 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004937
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004938 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004939
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004940 BT_DBG("%s", hdev->name);
4941
4942 if (hdev->dev_type == HCI_AMP)
4943 type = AMP_LINK;
4944 else
4945 type = ACL_LINK;
4946
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004947 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004948 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004949 u32 priority = (skb_peek(&chan->data_q))->priority;
4950 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4951 int blocks;
4952
4953 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004954 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004955
4956 /* Stop if priority has changed */
4957 if (skb->priority < priority)
4958 break;
4959
4960 skb = skb_dequeue(&chan->data_q);
4961
4962 blocks = __get_blocks(hdev, skb);
4963 if (blocks > hdev->block_cnt)
4964 return;
4965
4966 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004967 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004968
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004969 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004970 hdev->acl_last_tx = jiffies;
4971
4972 hdev->block_cnt -= blocks;
4973 quote -= blocks;
4974
4975 chan->sent += blocks;
4976 chan->conn->sent += blocks;
4977 }
4978 }
4979
4980 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004981 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004982}
4983
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004984static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004985{
4986 BT_DBG("%s", hdev->name);
4987
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004988 /* No ACL link over BR/EDR controller */
4989 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4990 return;
4991
4992 /* No AMP link over AMP controller */
4993 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004994 return;
4995
4996 switch (hdev->flow_ctl_mode) {
4997 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4998 hci_sched_acl_pkt(hdev);
4999 break;
5000
5001 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5002 hci_sched_acl_blk(hdev);
5003 break;
5004 }
5005}
5006
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005008static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009{
5010 struct hci_conn *conn;
5011 struct sk_buff *skb;
5012 int quote;
5013
5014 BT_DBG("%s", hdev->name);
5015
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005016 if (!hci_conn_num(hdev, SCO_LINK))
5017 return;
5018
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5020 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5021 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005022 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023
5024 conn->sent++;
5025 if (conn->sent == ~0)
5026 conn->sent = 0;
5027 }
5028 }
5029}
5030
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005031static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005032{
5033 struct hci_conn *conn;
5034 struct sk_buff *skb;
5035 int quote;
5036
5037 BT_DBG("%s", hdev->name);
5038
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005039 if (!hci_conn_num(hdev, ESCO_LINK))
5040 return;
5041
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005042 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5043 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005044 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5045 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005046 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005047
5048 conn->sent++;
5049 if (conn->sent == ~0)
5050 conn->sent = 0;
5051 }
5052 }
5053}
5054
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005055static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005056{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005057 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005058 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005059 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005060
5061 BT_DBG("%s", hdev->name);
5062
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005063 if (!hci_conn_num(hdev, LE_LINK))
5064 return;
5065
Marcel Holtmann4a964402014-07-02 19:10:33 +02005066 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005067 /* LE tx timeout must be longer than maximum
5068 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005069 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005070 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005071 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005072 }
5073
5074 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005075 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005076 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005077 u32 priority = (skb_peek(&chan->data_q))->priority;
5078 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005079 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005080 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005081
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005082 /* Stop if priority has changed */
5083 if (skb->priority < priority)
5084 break;
5085
5086 skb = skb_dequeue(&chan->data_q);
5087
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005088 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005089 hdev->le_last_tx = jiffies;
5090
5091 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005092 chan->sent++;
5093 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005094 }
5095 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005096
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005097 if (hdev->le_pkts)
5098 hdev->le_cnt = cnt;
5099 else
5100 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005101
5102 if (cnt != tmp)
5103 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005104}
5105
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005106static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005108 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109 struct sk_buff *skb;
5110
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005111 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005112 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113
Marcel Holtmann52de5992013-09-03 18:08:38 -07005114 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5115 /* Schedule queues and send stuff to HCI driver */
5116 hci_sched_acl(hdev);
5117 hci_sched_sco(hdev);
5118 hci_sched_esco(hdev);
5119 hci_sched_le(hdev);
5120 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005121
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122 /* Send next queued raw (unknown type) packet */
5123 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005124 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125}
5126
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005127/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128
5129/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005130static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131{
5132 struct hci_acl_hdr *hdr = (void *) skb->data;
5133 struct hci_conn *conn;
5134 __u16 handle, flags;
5135
5136 skb_pull(skb, HCI_ACL_HDR_SIZE);
5137
5138 handle = __le16_to_cpu(hdr->handle);
5139 flags = hci_flags(handle);
5140 handle = hci_handle(handle);
5141
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005142 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005143 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005144
5145 hdev->stat.acl_rx++;
5146
5147 hci_dev_lock(hdev);
5148 conn = hci_conn_hash_lookup_handle(hdev, handle);
5149 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005150
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005152 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005153
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005155 l2cap_recv_acldata(conn, skb, flags);
5156 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005158 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005159 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160 }
5161
5162 kfree_skb(skb);
5163}
5164
5165/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005166static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167{
5168 struct hci_sco_hdr *hdr = (void *) skb->data;
5169 struct hci_conn *conn;
5170 __u16 handle;
5171
5172 skb_pull(skb, HCI_SCO_HDR_SIZE);
5173
5174 handle = __le16_to_cpu(hdr->handle);
5175
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005176 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177
5178 hdev->stat.sco_rx++;
5179
5180 hci_dev_lock(hdev);
5181 conn = hci_conn_hash_lookup_handle(hdev, handle);
5182 hci_dev_unlock(hdev);
5183
5184 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005186 sco_recv_scodata(conn, skb);
5187 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005188 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005189 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005190 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191 }
5192
5193 kfree_skb(skb);
5194}
5195
Johan Hedberg9238f362013-03-05 20:37:48 +02005196static bool hci_req_is_complete(struct hci_dev *hdev)
5197{
5198 struct sk_buff *skb;
5199
5200 skb = skb_peek(&hdev->cmd_q);
5201 if (!skb)
5202 return true;
5203
5204 return bt_cb(skb)->req.start;
5205}
5206
Johan Hedberg42c6b122013-03-05 20:37:49 +02005207static void hci_resend_last(struct hci_dev *hdev)
5208{
5209 struct hci_command_hdr *sent;
5210 struct sk_buff *skb;
5211 u16 opcode;
5212
5213 if (!hdev->sent_cmd)
5214 return;
5215
5216 sent = (void *) hdev->sent_cmd->data;
5217 opcode = __le16_to_cpu(sent->opcode);
5218 if (opcode == HCI_OP_RESET)
5219 return;
5220
5221 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5222 if (!skb)
5223 return;
5224
5225 skb_queue_head(&hdev->cmd_q, skb);
5226 queue_work(hdev->workqueue, &hdev->cmd_work);
5227}
5228
Johan Hedberg9238f362013-03-05 20:37:48 +02005229void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5230{
5231 hci_req_complete_t req_complete = NULL;
5232 struct sk_buff *skb;
5233 unsigned long flags;
5234
5235 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5236
Johan Hedberg42c6b122013-03-05 20:37:49 +02005237 /* If the completed command doesn't match the last one that was
5238 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005239 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005240 if (!hci_sent_cmd_data(hdev, opcode)) {
5241 /* Some CSR based controllers generate a spontaneous
5242 * reset complete event during init and any pending
5243 * command will never be completed. In such a case we
5244 * need to resend whatever was the last sent
5245 * command.
5246 */
5247 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5248 hci_resend_last(hdev);
5249
Johan Hedberg9238f362013-03-05 20:37:48 +02005250 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005251 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005252
5253 /* If the command succeeded and there's still more commands in
5254 * this request the request is not yet complete.
5255 */
5256 if (!status && !hci_req_is_complete(hdev))
5257 return;
5258
5259 /* If this was the last command in a request the complete
5260 * callback would be found in hdev->sent_cmd instead of the
5261 * command queue (hdev->cmd_q).
5262 */
5263 if (hdev->sent_cmd) {
5264 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005265
5266 if (req_complete) {
5267 /* We must set the complete callback to NULL to
5268 * avoid calling the callback more than once if
5269 * this function gets called again.
5270 */
5271 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5272
Johan Hedberg9238f362013-03-05 20:37:48 +02005273 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005274 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005275 }
5276
5277 /* Remove all pending commands belonging to this request */
5278 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5279 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5280 if (bt_cb(skb)->req.start) {
5281 __skb_queue_head(&hdev->cmd_q, skb);
5282 break;
5283 }
5284
5285 req_complete = bt_cb(skb)->req.complete;
5286 kfree_skb(skb);
5287 }
5288 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5289
5290call_complete:
5291 if (req_complete)
5292 req_complete(hdev, status);
5293}
5294
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005295static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005296{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005297 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298 struct sk_buff *skb;
5299
5300 BT_DBG("%s", hdev->name);
5301
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005303 /* Send copy to monitor */
5304 hci_send_to_monitor(hdev, skb);
5305
Linus Torvalds1da177e2005-04-16 15:20:36 -07005306 if (atomic_read(&hdev->promisc)) {
5307 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005308 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309 }
5310
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005311 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312 kfree_skb(skb);
5313 continue;
5314 }
5315
5316 if (test_bit(HCI_INIT, &hdev->flags)) {
5317 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005318 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319 case HCI_ACLDATA_PKT:
5320 case HCI_SCODATA_PKT:
5321 kfree_skb(skb);
5322 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005323 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324 }
5325
5326 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005327 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005329 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330 hci_event_packet(hdev, skb);
5331 break;
5332
5333 case HCI_ACLDATA_PKT:
5334 BT_DBG("%s ACL data packet", hdev->name);
5335 hci_acldata_packet(hdev, skb);
5336 break;
5337
5338 case HCI_SCODATA_PKT:
5339 BT_DBG("%s SCO data packet", hdev->name);
5340 hci_scodata_packet(hdev, skb);
5341 break;
5342
5343 default:
5344 kfree_skb(skb);
5345 break;
5346 }
5347 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005348}
5349
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005350static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005351{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005352 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005353 struct sk_buff *skb;
5354
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005355 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5356 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005357
Linus Torvalds1da177e2005-04-16 15:20:36 -07005358 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005359 if (atomic_read(&hdev->cmd_cnt)) {
5360 skb = skb_dequeue(&hdev->cmd_q);
5361 if (!skb)
5362 return;
5363
Wei Yongjun7585b972009-02-25 18:29:52 +08005364 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005365
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005366 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005367 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005369 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005370 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005371 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005372 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005373 schedule_delayed_work(&hdev->cmd_timer,
5374 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375 } else {
5376 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005377 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 }
5379 }
5380}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005381
5382void hci_req_add_le_scan_disable(struct hci_request *req)
5383{
5384 struct hci_cp_le_set_scan_enable cp;
5385
5386 memset(&cp, 0, sizeof(cp));
5387 cp.enable = LE_SCAN_DISABLE;
5388 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5389}
Andre Guedesa4790db2014-02-26 20:21:47 -03005390
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005391void hci_req_add_le_passive_scan(struct hci_request *req)
5392{
5393 struct hci_cp_le_set_scan_param param_cp;
5394 struct hci_cp_le_set_scan_enable enable_cp;
5395 struct hci_dev *hdev = req->hdev;
5396 u8 own_addr_type;
5397
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005398 /* Set require_privacy to false since no SCAN_REQ are send
5399 * during passive scanning. Not using an unresolvable address
5400 * here is important so that peer devices using direct
5401 * advertising with our address will be correctly reported
5402 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005403 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005404 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005405 return;
5406
5407 memset(&param_cp, 0, sizeof(param_cp));
5408 param_cp.type = LE_SCAN_PASSIVE;
5409 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5410 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5411 param_cp.own_address_type = own_addr_type;
5412 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5413 &param_cp);
5414
5415 memset(&enable_cp, 0, sizeof(enable_cp));
5416 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005417 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005418 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5419 &enable_cp);
5420}
5421
Andre Guedesa4790db2014-02-26 20:21:47 -03005422static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5423{
5424 if (status)
5425 BT_DBG("HCI request failed to update background scanning: "
5426 "status 0x%2.2x", status);
5427}
5428
5429/* This function controls the background scanning based on hdev->pend_le_conns
5430 * list. If there are pending LE connection we start the background scanning,
5431 * otherwise we stop it.
5432 *
5433 * This function requires the caller holds hdev->lock.
5434 */
5435void hci_update_background_scan(struct hci_dev *hdev)
5436{
Andre Guedesa4790db2014-02-26 20:21:47 -03005437 struct hci_request req;
5438 struct hci_conn *conn;
5439 int err;
5440
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005441 if (!test_bit(HCI_UP, &hdev->flags) ||
5442 test_bit(HCI_INIT, &hdev->flags) ||
5443 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005444 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005445 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005446 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005447 return;
5448
Johan Hedberga70f4b52014-07-07 15:19:50 +03005449 /* No point in doing scanning if LE support hasn't been enabled */
5450 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5451 return;
5452
Johan Hedbergae23ada2014-07-07 13:24:59 +03005453 /* If discovery is active don't interfere with it */
5454 if (hdev->discovery.state != DISCOVERY_STOPPED)
5455 return;
5456
Andre Guedesa4790db2014-02-26 20:21:47 -03005457 hci_req_init(&req, hdev);
5458
Johan Hedberg2b7be332014-07-07 14:40:22 +03005459 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5460 list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005461 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005462 /* If there is no pending LE connections or devices
5463 * to be scanned for, we should stop the background
5464 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005465 */
5466
5467 /* If controller is not scanning we are done. */
5468 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5469 return;
5470
5471 hci_req_add_le_scan_disable(&req);
5472
5473 BT_DBG("%s stopping background scanning", hdev->name);
5474 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005475 /* If there is at least one pending LE connection, we should
5476 * keep the background scan running.
5477 */
5478
Andre Guedesa4790db2014-02-26 20:21:47 -03005479 /* If controller is connecting, we should not start scanning
5480 * since some controllers are not able to scan and connect at
5481 * the same time.
5482 */
5483 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5484 if (conn)
5485 return;
5486
Andre Guedes4340a122014-03-10 18:26:24 -03005487 /* If controller is currently scanning, we stop it to ensure we
5488 * don't miss any advertising (due to duplicates filter).
5489 */
5490 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5491 hci_req_add_le_scan_disable(&req);
5492
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005493 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005494
5495 BT_DBG("%s starting background scanning", hdev->name);
5496 }
5497
5498 err = hci_req_run(&req, update_background_scan_complete);
5499 if (err)
5500 BT_ERR("Failed to run HCI request: err %d", err);
5501}