blob: 078f1ecbc058281dcb81e9433f1b351f3647283f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Johan Hedberg66593582014-07-09 12:59:14 +0300203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmann47219832013-10-17 17:24:15 -0700228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700236
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700243
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700244 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
Marcel Holtmann041000b2013-10-17 12:02:31 -0700351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
Marcel Holtmann111902f2014-06-21 04:53:17 +0200421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800422 return -EALREADY;
423
Marcel Holtmann111902f2014-06-21 04:53:17 +0200424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700462 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700521 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700549 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
Marcel Holtmannac345812014-02-23 12:44:25 -0800625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200628 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
Johan Hedberga1f4c312014-02-27 14:05:41 +0200633 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800634
Johan Hedberga1f4c312014-02-27 14:05:41 +0200635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800636 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700704{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712}
713
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
722
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
725
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700731 return -EINVAL;
732
Marcel Holtmann111902f2014-06-21 04:53:17 +0200733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800734 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700735
Marcel Holtmann111902f2014-06-21 04:53:17 +0200736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800737
738 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700739}
740
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
Marcel Holtmann92202182013-10-18 16:38:10 -0700747
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
Marcel Holtmann3698d702014-02-18 21:54:49 -0800773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800809 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800814 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700869 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
Marcel Holtmannf1649572014-06-30 12:34:38 +0200917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200973static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300974{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200975 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 struct hci_conn_params *p;
977
978 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300979 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300981 p->auto_connect);
982 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300983 hci_dev_unlock(hdev);
984
985 return 0;
986}
987
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200988static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300989{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200990 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300991}
992
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200993static const struct file_operations device_list_fops = {
994 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300996 .llseek = seq_lseek,
997 .release = single_release,
998};
999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000/* ---- HCI requests ---- */
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1010 }
1011}
1012
1013static void hci_req_cancel(struct hci_dev *hdev, int err)
1014{
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1021 }
1022}
1023
Fengguang Wu77a63e02013-04-20 16:24:31 +03001024static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001026{
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1030
1031 hci_dev_lock(hdev);
1032
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1035
1036 hci_dev_unlock(hdev);
1037
1038 if (!skb)
1039 return ERR_PTR(-ENODATA);
1040
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1043 goto failed;
1044 }
1045
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001049 if (event) {
1050 if (hdr->evt != event)
1051 goto failed;
1052 return skb;
1053 }
1054
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057 goto failed;
1058 }
1059
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1062 goto failed;
1063 }
1064
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1067
1068 if (opcode == __le16_to_cpu(ev->opcode))
1069 return skb;
1070
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1073
1074failed:
1075 kfree_skb(skb);
1076 return ERR_PTR(-ENODATA);
1077}
1078
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001079struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001080 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001081{
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1084 int err = 0;
1085
1086 BT_DBG("%s", hdev->name);
1087
1088 hci_req_init(&req, hdev);
1089
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001090 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001091
1092 hdev->req_status = HCI_REQ_PEND;
1093
1094 err = hci_req_run(&req, hci_req_sync_complete);
1095 if (err < 0)
1096 return ERR_PTR(err);
1097
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1100
1101 schedule_timeout(timeout);
1102
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1107
1108 switch (hdev->req_status) {
1109 case HCI_REQ_DONE:
1110 err = -bt_to_errno(hdev->req_result);
1111 break;
1112
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1115 break;
1116
1117 default:
1118 err = -ETIMEDOUT;
1119 break;
1120 }
1121
1122 hdev->req_status = hdev->req_result = 0;
1123
1124 BT_DBG("%s end: err %d", hdev->name, err);
1125
1126 if (err < 0)
1127 return ERR_PTR(err);
1128
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001129 return hci_get_cmd_complete(hdev, opcode, event);
1130}
1131EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001134 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001135{
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137}
1138EXPORT_SYMBOL(__hci_cmd_sync);
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001141static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 void (*func)(struct hci_request *req,
1143 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001144 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 DECLARE_WAITQUEUE(wait, current);
1148 int err = 0;
1149
1150 BT_DBG("%s start", hdev->name);
1151
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_init(&req, hdev);
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 hdev->req_status = HCI_REQ_PEND;
1155
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001157
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 err = hci_req_run(&req, hci_req_sync_complete);
1159 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001160 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001161
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 */
Andre Guedes920c8302013-03-08 11:20:15 -03001167 if (err == -ENODATA)
1168 return 0;
1169
1170 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001171 }
1172
Andre Guedesbc4445c2013-03-08 11:20:13 -03001173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return -EINTR;
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001185 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Johan Hedberga5040ef2011-01-10 13:28:59 +02001197 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 return err;
1202}
1203
Johan Hedberg01178cd2013-03-05 20:37:41 +02001204static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 void (*req)(struct hci_request *req,
1206 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001207 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
1209 int ret;
1210
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001211 if (!test_bit(HCI_UP, &hdev->flags))
1212 return -ENETDOWN;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 /* Serialize all requests */
1215 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 hci_req_unlock(hdev);
1218
1219 return ret;
1220}
1221
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001238 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240
1241 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001246{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001248
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001249 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001251
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001258 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001260
1261 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001263
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001269}
1270
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001272{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001274
1275 BT_DBG("%s %ld", hdev->name, opt);
1276
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001277 /* Reset */
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001280
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281 switch (hdev->dev_type) {
1282 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 break;
1285
1286 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001288 break;
1289
1290 default:
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1292 break;
1293 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001294}
1295
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001298 struct hci_dev *hdev = req->hdev;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 __le16 param;
1301 __u8 flt_type;
1302
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305
1306 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308
1309 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001311
1312 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001314
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001324
1325 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001326 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1331 */
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001336}
1337
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001340 struct hci_dev *hdev = req->hdev;
1341
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344
1345 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
Johan Hedberg2177bab2013-03-05 20:37:43 +02001351 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001356
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360}
1361
1362static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363{
1364 if (lmp_ext_inq_capable(hdev))
1365 return 0x02;
1366
1367 if (lmp_inq_rssi_capable(hdev))
1368 return 0x01;
1369
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1372 return 0x01;
1373
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376 return 0x01;
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378 return 0x01;
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380 return 0x01;
1381 }
1382
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1385 return 0x01;
1386
1387 return 0x00;
1388}
1389
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391{
1392 u8 mode;
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395
Johan Hedberg42c6b122013-03-05 20:37:49 +02001396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397}
1398
Johan Hedberg42c6b122013-03-05 20:37:49 +02001399static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 struct hci_dev *hdev = req->hdev;
1402
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1406 */
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1411 */
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413 return;
1414
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001421 } else {
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001431
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001436 }
1437
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1443
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1467 */
1468 }
1469
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1472
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001474}
1475
Johan Hedberg42c6b122013-03-05 20:37:49 +02001476static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 struct hci_dev *hdev = req->hdev;
1479
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001482 else
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001484
1485 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1490 */
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001493
1494 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1500 */
1501 hdev->max_page = 0x01;
1502
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507 } else {
1508 struct hci_cp_write_eir cp;
1509
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1512
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515 }
1516
1517 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001518 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1525
1526 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529 }
1530
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001535 }
1536}
1537
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001539{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1543
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1552
1553 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555}
1556
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001560 struct hci_cp_write_le_host_supported cp;
1561
Johan Hedbergc73eee92013-04-19 18:35:21 +03001562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1564 return;
1565
Johan Hedberg2177bab2013-03-05 20:37:43 +02001566 memset(&cp, 0, sizeof(cp));
1567
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569 cp.le = 0x01;
1570 cp.simul = lmp_le_br_capable(hdev);
1571 }
1572
1573 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576}
1577
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001578static void hci_set_event_mask_page_2(struct hci_request *req)
1579{
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1585 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001586 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1591 }
1592
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1595 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001596 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1601 }
1602
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001603 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001605 events[2] |= 0x80;
1606
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608}
1609
Johan Hedberg42c6b122013-03-05 20:37:49 +02001610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001614
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001615 hci_setup_event_mask(req);
1616
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1620 *
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001625 *
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001629 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001632 struct hci_cp_delete_stored_link_key cp;
1633
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637 sizeof(cp), &cp);
1638 }
1639
Johan Hedberg2177bab2013-03-05 20:37:43 +02001640 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001641 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001642
Andre Guedes9193c6e2014-07-01 18:10:09 -03001643 if (lmp_le_capable(hdev)) {
1644 u8 events[8];
1645
1646 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001647 events[0] = 0x0f;
1648
1649 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001651
1652 /* If controller supports the Connection Parameters Request
1653 * Link Layer Procedure, enable the corresponding event.
1654 */
1655 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656 events[0] |= 0x20; /* LE Remote Connection
1657 * Parameter Request
1658 */
1659
Andre Guedes9193c6e2014-07-01 18:10:09 -03001660 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1661 events);
1662
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001663 if (hdev->commands[25] & 0x40) {
1664 /* Read LE Advertising Channel TX Power */
1665 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1666 }
1667
Johan Hedberg42c6b122013-03-05 20:37:49 +02001668 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001669 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001670
1671 /* Read features beyond page 1 if available */
1672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673 struct hci_cp_read_local_ext_features cp;
1674
1675 cp.page = p;
1676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1677 sizeof(cp), &cp);
1678 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001679}
1680
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001681static void hci_init4_req(struct hci_request *req, unsigned long opt)
1682{
1683 struct hci_dev *hdev = req->hdev;
1684
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001685 /* Set event mask page 2 if the HCI command for it is supported */
1686 if (hdev->commands[22] & 0x04)
1687 hci_set_event_mask_page_2(req);
1688
Marcel Holtmann109e3192014-07-23 19:24:56 +02001689 /* Read local codec list if the HCI command is supported */
1690 if (hdev->commands[29] & 0x20)
1691 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1692
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001693 /* Get MWS transport configuration if the HCI command is supported */
1694 if (hdev->commands[30] & 0x08)
1695 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1696
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001697 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001698 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001699 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001700
1701 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001702 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001703 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001704 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1705 u8 support = 0x01;
1706 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1707 sizeof(support), &support);
1708 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001709}
1710
Johan Hedberg2177bab2013-03-05 20:37:43 +02001711static int __hci_init(struct hci_dev *hdev)
1712{
1713 int err;
1714
1715 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1716 if (err < 0)
1717 return err;
1718
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001719 /* The Device Under Test (DUT) mode is special and available for
1720 * all controller types. So just create it early on.
1721 */
1722 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1723 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1724 &dut_mode_fops);
1725 }
1726
Johan Hedberg2177bab2013-03-05 20:37:43 +02001727 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1728 * BR/EDR/LE type controllers. AMP controllers only need the
1729 * first stage init.
1730 */
1731 if (hdev->dev_type != HCI_BREDR)
1732 return 0;
1733
1734 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1735 if (err < 0)
1736 return err;
1737
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001738 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1739 if (err < 0)
1740 return err;
1741
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001742 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1743 if (err < 0)
1744 return err;
1745
1746 /* Only create debugfs entries during the initial setup
1747 * phase and not every time the controller gets powered on.
1748 */
1749 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1750 return 0;
1751
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001752 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1753 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001754 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1755 &hdev->manufacturer);
1756 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1757 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001758 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1759 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001760 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1761 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001762 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1763
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001764 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1765 &conn_info_min_age_fops);
1766 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1767 &conn_info_max_age_fops);
1768
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001769 if (lmp_bredr_capable(hdev)) {
1770 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1771 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001772 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1773 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001774 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1775 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001776 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1777 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001778 }
1779
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001780 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001781 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1782 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001783 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1784 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001785 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1786 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001787 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001788
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001789 if (lmp_sniff_capable(hdev)) {
1790 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1791 hdev, &idle_timeout_fops);
1792 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1793 hdev, &sniff_min_interval_fops);
1794 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1795 hdev, &sniff_max_interval_fops);
1796 }
1797
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001798 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001799 debugfs_create_file("identity", 0400, hdev->debugfs,
1800 hdev, &identity_fops);
1801 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1802 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001803 debugfs_create_file("random_address", 0444, hdev->debugfs,
1804 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001805 debugfs_create_file("static_address", 0444, hdev->debugfs,
1806 hdev, &static_address_fops);
1807
1808 /* For controllers with a public address, provide a debug
1809 * option to force the usage of the configured static
1810 * address. By default the public address is used.
1811 */
1812 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1813 debugfs_create_file("force_static_address", 0644,
1814 hdev->debugfs, hdev,
1815 &force_static_address_fops);
1816
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001817 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1818 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001819 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1820 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001821 debugfs_create_file("identity_resolving_keys", 0400,
1822 hdev->debugfs, hdev,
1823 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001824 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1825 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001826 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1827 hdev, &conn_min_interval_fops);
1828 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1829 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001830 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1831 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001832 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1833 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001834 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1835 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001836 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1837 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001838 debugfs_create_u16("discov_interleaved_timeout", 0644,
1839 hdev->debugfs,
1840 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001841 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001842
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001843 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001844}
1845
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001846static void hci_init0_req(struct hci_request *req, unsigned long opt)
1847{
1848 struct hci_dev *hdev = req->hdev;
1849
1850 BT_DBG("%s %ld", hdev->name, opt);
1851
1852 /* Reset */
1853 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1854 hci_reset_req(req, 0);
1855
1856 /* Read Local Version */
1857 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1858
1859 /* Read BD Address */
1860 if (hdev->set_bdaddr)
1861 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1862}
1863
1864static int __hci_unconf_init(struct hci_dev *hdev)
1865{
1866 int err;
1867
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001868 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1869 return 0;
1870
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001871 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1872 if (err < 0)
1873 return err;
1874
1875 return 0;
1876}
1877
Johan Hedberg42c6b122013-03-05 20:37:49 +02001878static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879{
1880 __u8 scan = opt;
1881
Johan Hedberg42c6b122013-03-05 20:37:49 +02001882 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
1884 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001885 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886}
1887
Johan Hedberg42c6b122013-03-05 20:37:49 +02001888static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889{
1890 __u8 auth = opt;
1891
Johan Hedberg42c6b122013-03-05 20:37:49 +02001892 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
1894 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001895 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896}
1897
Johan Hedberg42c6b122013-03-05 20:37:49 +02001898static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899{
1900 __u8 encrypt = opt;
1901
Johan Hedberg42c6b122013-03-05 20:37:49 +02001902 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001904 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001905 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906}
1907
Johan Hedberg42c6b122013-03-05 20:37:49 +02001908static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001909{
1910 __le16 policy = cpu_to_le16(opt);
1911
Johan Hedberg42c6b122013-03-05 20:37:49 +02001912 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001913
1914 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001915 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001916}
1917
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001918/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 * Device is held on return. */
1920struct hci_dev *hci_dev_get(int index)
1921{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001922 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
1924 BT_DBG("%d", index);
1925
1926 if (index < 0)
1927 return NULL;
1928
1929 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001930 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 if (d->id == index) {
1932 hdev = hci_dev_hold(d);
1933 break;
1934 }
1935 }
1936 read_unlock(&hci_dev_list_lock);
1937 return hdev;
1938}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939
1940/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001941
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001942bool hci_discovery_active(struct hci_dev *hdev)
1943{
1944 struct discovery_state *discov = &hdev->discovery;
1945
Andre Guedes6fbe1952012-02-03 17:47:58 -03001946 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001947 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001948 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001949 return true;
1950
Andre Guedes6fbe1952012-02-03 17:47:58 -03001951 default:
1952 return false;
1953 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001954}
1955
Johan Hedbergff9ef572012-01-04 14:23:45 +02001956void hci_discovery_set_state(struct hci_dev *hdev, int state)
1957{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001958 int old_state = hdev->discovery.state;
1959
Johan Hedbergff9ef572012-01-04 14:23:45 +02001960 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1961
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001962 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001963 return;
1964
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001965 hdev->discovery.state = state;
1966
Johan Hedbergff9ef572012-01-04 14:23:45 +02001967 switch (state) {
1968 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001969 hci_update_background_scan(hdev);
1970
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001971 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001972 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001973 break;
1974 case DISCOVERY_STARTING:
1975 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001976 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001977 mgmt_discovering(hdev, 1);
1978 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001979 case DISCOVERY_RESOLVING:
1980 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001981 case DISCOVERY_STOPPING:
1982 break;
1983 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001984}
1985
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001986void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987{
Johan Hedberg30883512012-01-04 14:16:21 +02001988 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001989 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
Johan Hedberg561aafb2012-01-04 13:31:59 +02001991 list_for_each_entry_safe(p, n, &cache->all, all) {
1992 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001993 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001995
1996 INIT_LIST_HEAD(&cache->unknown);
1997 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998}
1999
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002000struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2001 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002{
Johan Hedberg30883512012-01-04 14:16:21 +02002003 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 struct inquiry_entry *e;
2005
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002006 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
Johan Hedberg561aafb2012-01-04 13:31:59 +02002008 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002010 return e;
2011 }
2012
2013 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014}
2015
Johan Hedberg561aafb2012-01-04 13:31:59 +02002016struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002017 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002018{
Johan Hedberg30883512012-01-04 14:16:21 +02002019 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002020 struct inquiry_entry *e;
2021
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002022 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002023
2024 list_for_each_entry(e, &cache->unknown, list) {
2025 if (!bacmp(&e->data.bdaddr, bdaddr))
2026 return e;
2027 }
2028
2029 return NULL;
2030}
2031
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002032struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002033 bdaddr_t *bdaddr,
2034 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002035{
2036 struct discovery_state *cache = &hdev->discovery;
2037 struct inquiry_entry *e;
2038
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002039 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002040
2041 list_for_each_entry(e, &cache->resolve, list) {
2042 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2043 return e;
2044 if (!bacmp(&e->data.bdaddr, bdaddr))
2045 return e;
2046 }
2047
2048 return NULL;
2049}
2050
Johan Hedberga3d4e202012-01-09 00:53:02 +02002051void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002052 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002053{
2054 struct discovery_state *cache = &hdev->discovery;
2055 struct list_head *pos = &cache->resolve;
2056 struct inquiry_entry *p;
2057
2058 list_del(&ie->list);
2059
2060 list_for_each_entry(p, &cache->resolve, list) {
2061 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002062 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002063 break;
2064 pos = &p->list;
2065 }
2066
2067 list_add(&ie->list, pos);
2068}
2069
Marcel Holtmannaf589252014-07-01 14:11:20 +02002070u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2071 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072{
Johan Hedberg30883512012-01-04 14:16:21 +02002073 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002074 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002075 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002077 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Szymon Janc2b2fec42012-11-20 11:38:54 +01002079 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2080
Marcel Holtmannaf589252014-07-01 14:11:20 +02002081 if (!data->ssp_mode)
2082 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002083
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002084 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002085 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002086 if (!ie->data.ssp_mode)
2087 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002088
Johan Hedberga3d4e202012-01-09 00:53:02 +02002089 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002090 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002091 ie->data.rssi = data->rssi;
2092 hci_inquiry_cache_update_resolve(hdev, ie);
2093 }
2094
Johan Hedberg561aafb2012-01-04 13:31:59 +02002095 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002096 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002097
Johan Hedberg561aafb2012-01-04 13:31:59 +02002098 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002099 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002100 if (!ie) {
2101 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2102 goto done;
2103 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002104
2105 list_add(&ie->all, &cache->all);
2106
2107 if (name_known) {
2108 ie->name_state = NAME_KNOWN;
2109 } else {
2110 ie->name_state = NAME_NOT_KNOWN;
2111 list_add(&ie->list, &cache->unknown);
2112 }
2113
2114update:
2115 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002116 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002117 ie->name_state = NAME_KNOWN;
2118 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 }
2120
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002121 memcpy(&ie->data, data, sizeof(*data));
2122 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002124
2125 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002126 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002127
Marcel Holtmannaf589252014-07-01 14:11:20 +02002128done:
2129 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130}
2131
2132static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2133{
Johan Hedberg30883512012-01-04 14:16:21 +02002134 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 struct inquiry_info *info = (struct inquiry_info *) buf;
2136 struct inquiry_entry *e;
2137 int copied = 0;
2138
Johan Hedberg561aafb2012-01-04 13:31:59 +02002139 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002141
2142 if (copied >= num)
2143 break;
2144
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 bacpy(&info->bdaddr, &data->bdaddr);
2146 info->pscan_rep_mode = data->pscan_rep_mode;
2147 info->pscan_period_mode = data->pscan_period_mode;
2148 info->pscan_mode = data->pscan_mode;
2149 memcpy(info->dev_class, data->dev_class, 3);
2150 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002151
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002153 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 }
2155
2156 BT_DBG("cache %p, copied %d", cache, copied);
2157 return copied;
2158}
2159
Johan Hedberg42c6b122013-03-05 20:37:49 +02002160static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161{
2162 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002163 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 struct hci_cp_inquiry cp;
2165
2166 BT_DBG("%s", hdev->name);
2167
2168 if (test_bit(HCI_INQUIRY, &hdev->flags))
2169 return;
2170
2171 /* Start Inquiry */
2172 memcpy(&cp.lap, &ir->lap, 3);
2173 cp.length = ir->length;
2174 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002175 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176}
2177
Andre Guedes3e13fa12013-03-27 20:04:56 -03002178static int wait_inquiry(void *word)
2179{
2180 schedule();
2181 return signal_pending(current);
2182}
2183
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184int hci_inquiry(void __user *arg)
2185{
2186 __u8 __user *ptr = arg;
2187 struct hci_inquiry_req ir;
2188 struct hci_dev *hdev;
2189 int err = 0, do_inquiry = 0, max_rsp;
2190 long timeo;
2191 __u8 *buf;
2192
2193 if (copy_from_user(&ir, ptr, sizeof(ir)))
2194 return -EFAULT;
2195
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002196 hdev = hci_dev_get(ir.dev_id);
2197 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 return -ENODEV;
2199
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002200 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2201 err = -EBUSY;
2202 goto done;
2203 }
2204
Marcel Holtmann4a964402014-07-02 19:10:33 +02002205 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002206 err = -EOPNOTSUPP;
2207 goto done;
2208 }
2209
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002210 if (hdev->dev_type != HCI_BREDR) {
2211 err = -EOPNOTSUPP;
2212 goto done;
2213 }
2214
Johan Hedberg56f87902013-10-02 13:43:13 +03002215 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2216 err = -EOPNOTSUPP;
2217 goto done;
2218 }
2219
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002220 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002221 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002222 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002223 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 do_inquiry = 1;
2225 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002226 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Marcel Holtmann04837f62006-07-03 10:02:33 +02002228 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002229
2230 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002231 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2232 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002233 if (err < 0)
2234 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002235
2236 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2237 * cleared). If it is interrupted by a signal, return -EINTR.
2238 */
2239 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2240 TASK_INTERRUPTIBLE))
2241 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002244 /* for unlimited number of responses we will use buffer with
2245 * 255 entries
2246 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2248
2249 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2250 * copy it to the user space.
2251 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002252 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002253 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 err = -ENOMEM;
2255 goto done;
2256 }
2257
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002258 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002260 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262 BT_DBG("num_rsp %d", ir.num_rsp);
2263
2264 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2265 ptr += sizeof(ir);
2266 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002267 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002269 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 err = -EFAULT;
2271
2272 kfree(buf);
2273
2274done:
2275 hci_dev_put(hdev);
2276 return err;
2277}
2278
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002279static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 int ret = 0;
2282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 BT_DBG("%s %p", hdev->name, hdev);
2284
2285 hci_req_lock(hdev);
2286
Johan Hovold94324962012-03-15 14:48:41 +01002287 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2288 ret = -ENODEV;
2289 goto done;
2290 }
2291
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002292 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2293 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002294 /* Check for rfkill but allow the HCI setup stage to
2295 * proceed (which in itself doesn't cause any RF activity).
2296 */
2297 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2298 ret = -ERFKILL;
2299 goto done;
2300 }
2301
2302 /* Check for valid public address or a configured static
2303 * random adddress, but let the HCI setup proceed to
2304 * be able to determine if there is a public address
2305 * or not.
2306 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002307 * In case of user channel usage, it is not important
2308 * if a public address or static random address is
2309 * available.
2310 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002311 * This check is only valid for BR/EDR controllers
2312 * since AMP controllers do not have an address.
2313 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002314 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2315 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002316 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2317 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2318 ret = -EADDRNOTAVAIL;
2319 goto done;
2320 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002321 }
2322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 if (test_bit(HCI_UP, &hdev->flags)) {
2324 ret = -EALREADY;
2325 goto done;
2326 }
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 if (hdev->open(hdev)) {
2329 ret = -EIO;
2330 goto done;
2331 }
2332
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002333 atomic_set(&hdev->cmd_cnt, 1);
2334 set_bit(HCI_INIT, &hdev->flags);
2335
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002336 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2337 if (hdev->setup)
2338 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002339
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002340 /* The transport driver can set these quirks before
2341 * creating the HCI device or in its setup callback.
2342 *
2343 * In case any of them is set, the controller has to
2344 * start up as unconfigured.
2345 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002346 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2347 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002348 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002349
2350 /* For an unconfigured controller it is required to
2351 * read at least the version information provided by
2352 * the Read Local Version Information command.
2353 *
2354 * If the set_bdaddr driver callback is provided, then
2355 * also the original Bluetooth public device address
2356 * will be read using the Read BD Address command.
2357 */
2358 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2359 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002360 }
2361
Marcel Holtmann9713c172014-07-06 12:11:15 +02002362 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2363 /* If public address change is configured, ensure that
2364 * the address gets programmed. If the driver does not
2365 * support changing the public address, fail the power
2366 * on procedure.
2367 */
2368 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2369 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002370 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2371 else
2372 ret = -EADDRNOTAVAIL;
2373 }
2374
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002375 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002376 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002377 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002378 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 }
2380
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002381 clear_bit(HCI_INIT, &hdev->flags);
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 if (!ret) {
2384 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002385 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 set_bit(HCI_UP, &hdev->flags);
2387 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002388 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002389 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002390 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002391 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002392 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002393 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002394 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002395 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002396 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002397 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002399 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002400 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002401 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
2403 skb_queue_purge(&hdev->cmd_q);
2404 skb_queue_purge(&hdev->rx_q);
2405
2406 if (hdev->flush)
2407 hdev->flush(hdev);
2408
2409 if (hdev->sent_cmd) {
2410 kfree_skb(hdev->sent_cmd);
2411 hdev->sent_cmd = NULL;
2412 }
2413
2414 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002415 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 }
2417
2418done:
2419 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 return ret;
2421}
2422
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002423/* ---- HCI ioctl helpers ---- */
2424
2425int hci_dev_open(__u16 dev)
2426{
2427 struct hci_dev *hdev;
2428 int err;
2429
2430 hdev = hci_dev_get(dev);
2431 if (!hdev)
2432 return -ENODEV;
2433
Marcel Holtmann4a964402014-07-02 19:10:33 +02002434 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002435 * up as user channel. Trying to bring them up as normal devices
2436 * will result into a failure. Only user channel operation is
2437 * possible.
2438 *
2439 * When this function is called for a user channel, the flag
2440 * HCI_USER_CHANNEL will be set first before attempting to
2441 * open the device.
2442 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002443 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002444 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2445 err = -EOPNOTSUPP;
2446 goto done;
2447 }
2448
Johan Hedberge1d08f42013-10-01 22:44:50 +03002449 /* We need to ensure that no other power on/off work is pending
2450 * before proceeding to call hci_dev_do_open. This is
2451 * particularly important if the setup procedure has not yet
2452 * completed.
2453 */
2454 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2455 cancel_delayed_work(&hdev->power_off);
2456
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002457 /* After this call it is guaranteed that the setup procedure
2458 * has finished. This means that error conditions like RFKILL
2459 * or no valid public or static random address apply.
2460 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002461 flush_workqueue(hdev->req_workqueue);
2462
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002463 /* For controllers not using the management interface and that
2464 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2465 * so that pairing works for them. Once the management interface
2466 * is in use this bit will be cleared again and userspace has
2467 * to explicitly enable it.
2468 */
2469 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2470 !test_bit(HCI_MGMT, &hdev->dev_flags))
2471 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2472
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002473 err = hci_dev_do_open(hdev);
2474
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002475done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002476 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002477 return err;
2478}
2479
Johan Hedbergd7347f32014-07-04 12:37:23 +03002480/* This function requires the caller holds hdev->lock */
2481static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2482{
2483 struct hci_conn_params *p;
2484
2485 list_for_each_entry(p, &hdev->le_conn_params, list)
2486 list_del_init(&p->action);
2487
2488 BT_DBG("All LE pending actions cleared");
2489}
2490
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491static int hci_dev_do_close(struct hci_dev *hdev)
2492{
2493 BT_DBG("%s %p", hdev->name, hdev);
2494
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002495 cancel_delayed_work(&hdev->power_off);
2496
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 hci_req_cancel(hdev, ENODEV);
2498 hci_req_lock(hdev);
2499
2500 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002501 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 hci_req_unlock(hdev);
2503 return 0;
2504 }
2505
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002506 /* Flush RX and TX works */
2507 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002508 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002510 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002511 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002512 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002513 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002514 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002515 }
2516
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002517 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002518 cancel_delayed_work(&hdev->service_cache);
2519
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002520 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002521
2522 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2523 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002524
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002525 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002526 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002528 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002529 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530
2531 hci_notify(hdev, HCI_DEV_DOWN);
2532
2533 if (hdev->flush)
2534 hdev->flush(hdev);
2535
2536 /* Reset device */
2537 skb_queue_purge(&hdev->cmd_q);
2538 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002539 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2540 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002541 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002543 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 clear_bit(HCI_INIT, &hdev->flags);
2545 }
2546
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002547 /* flush cmd work */
2548 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549
2550 /* Drop queues */
2551 skb_queue_purge(&hdev->rx_q);
2552 skb_queue_purge(&hdev->cmd_q);
2553 skb_queue_purge(&hdev->raw_q);
2554
2555 /* Drop last sent command */
2556 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002557 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 kfree_skb(hdev->sent_cmd);
2559 hdev->sent_cmd = NULL;
2560 }
2561
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002562 kfree_skb(hdev->recv_evt);
2563 hdev->recv_evt = NULL;
2564
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 /* After this point our queues are empty
2566 * and no tasks are scheduled. */
2567 hdev->close(hdev);
2568
Johan Hedberg35b973c2013-03-15 17:06:59 -05002569 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002570 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002571 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2572
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002573 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2574 if (hdev->dev_type == HCI_BREDR) {
2575 hci_dev_lock(hdev);
2576 mgmt_powered(hdev, 0);
2577 hci_dev_unlock(hdev);
2578 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002579 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002580
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002581 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002582 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002583
Johan Hedberge59fda82012-02-22 18:11:53 +02002584 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002585 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002586 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002587
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 hci_req_unlock(hdev);
2589
2590 hci_dev_put(hdev);
2591 return 0;
2592}
2593
2594int hci_dev_close(__u16 dev)
2595{
2596 struct hci_dev *hdev;
2597 int err;
2598
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002599 hdev = hci_dev_get(dev);
2600 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002602
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002603 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2604 err = -EBUSY;
2605 goto done;
2606 }
2607
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002608 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2609 cancel_delayed_work(&hdev->power_off);
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002612
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002613done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 hci_dev_put(hdev);
2615 return err;
2616}
2617
2618int hci_dev_reset(__u16 dev)
2619{
2620 struct hci_dev *hdev;
2621 int ret = 0;
2622
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002623 hdev = hci_dev_get(dev);
2624 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 return -ENODEV;
2626
2627 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628
Marcel Holtmann808a0492013-08-26 20:57:58 -07002629 if (!test_bit(HCI_UP, &hdev->flags)) {
2630 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002634 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2635 ret = -EBUSY;
2636 goto done;
2637 }
2638
Marcel Holtmann4a964402014-07-02 19:10:33 +02002639 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002640 ret = -EOPNOTSUPP;
2641 goto done;
2642 }
2643
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 /* Drop queues */
2645 skb_queue_purge(&hdev->rx_q);
2646 skb_queue_purge(&hdev->cmd_q);
2647
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002648 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002649 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002651 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652
2653 if (hdev->flush)
2654 hdev->flush(hdev);
2655
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002656 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002657 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002659 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
2661done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 hci_req_unlock(hdev);
2663 hci_dev_put(hdev);
2664 return ret;
2665}
2666
2667int hci_dev_reset_stat(__u16 dev)
2668{
2669 struct hci_dev *hdev;
2670 int ret = 0;
2671
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002672 hdev = hci_dev_get(dev);
2673 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 return -ENODEV;
2675
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002676 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2677 ret = -EBUSY;
2678 goto done;
2679 }
2680
Marcel Holtmann4a964402014-07-02 19:10:33 +02002681 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002682 ret = -EOPNOTSUPP;
2683 goto done;
2684 }
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2687
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002688done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 return ret;
2691}
2692
Johan Hedberg123abc02014-07-10 12:09:07 +03002693static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2694{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002695 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002696
2697 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2698
2699 if ((scan & SCAN_PAGE))
2700 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2701 &hdev->dev_flags);
2702 else
2703 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2704 &hdev->dev_flags);
2705
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002706 if ((scan & SCAN_INQUIRY)) {
2707 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2708 &hdev->dev_flags);
2709 } else {
2710 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2711 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2712 &hdev->dev_flags);
2713 }
2714
Johan Hedberg123abc02014-07-10 12:09:07 +03002715 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2716 return;
2717
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002718 if (conn_changed || discov_changed) {
2719 /* In case this was disabled through mgmt */
2720 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2721
2722 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2723 mgmt_update_adv_data(hdev);
2724
Johan Hedberg123abc02014-07-10 12:09:07 +03002725 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002726 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002727}
2728
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729int hci_dev_cmd(unsigned int cmd, void __user *arg)
2730{
2731 struct hci_dev *hdev;
2732 struct hci_dev_req dr;
2733 int err = 0;
2734
2735 if (copy_from_user(&dr, arg, sizeof(dr)))
2736 return -EFAULT;
2737
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002738 hdev = hci_dev_get(dr.dev_id);
2739 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 return -ENODEV;
2741
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002742 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2743 err = -EBUSY;
2744 goto done;
2745 }
2746
Marcel Holtmann4a964402014-07-02 19:10:33 +02002747 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002748 err = -EOPNOTSUPP;
2749 goto done;
2750 }
2751
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002752 if (hdev->dev_type != HCI_BREDR) {
2753 err = -EOPNOTSUPP;
2754 goto done;
2755 }
2756
Johan Hedberg56f87902013-10-02 13:43:13 +03002757 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2758 err = -EOPNOTSUPP;
2759 goto done;
2760 }
2761
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 switch (cmd) {
2763 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002764 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2765 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 break;
2767
2768 case HCISETENCRYPT:
2769 if (!lmp_encrypt_capable(hdev)) {
2770 err = -EOPNOTSUPP;
2771 break;
2772 }
2773
2774 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2775 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002776 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2777 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 if (err)
2779 break;
2780 }
2781
Johan Hedberg01178cd2013-03-05 20:37:41 +02002782 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2783 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 break;
2785
2786 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002787 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2788 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002789
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002790 /* Ensure that the connectable and discoverable states
2791 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002792 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002793 if (!err)
2794 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 break;
2796
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002797 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002798 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2799 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002800 break;
2801
2802 case HCISETLINKMODE:
2803 hdev->link_mode = ((__u16) dr.dev_opt) &
2804 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2805 break;
2806
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 case HCISETPTYPE:
2808 hdev->pkt_type = (__u16) dr.dev_opt;
2809 break;
2810
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002812 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2813 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 break;
2815
2816 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002817 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2818 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 break;
2820
2821 default:
2822 err = -EINVAL;
2823 break;
2824 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002825
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002826done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 hci_dev_put(hdev);
2828 return err;
2829}
2830
2831int hci_get_dev_list(void __user *arg)
2832{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002833 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 struct hci_dev_list_req *dl;
2835 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 int n = 0, size, err;
2837 __u16 dev_num;
2838
2839 if (get_user(dev_num, (__u16 __user *) arg))
2840 return -EFAULT;
2841
2842 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2843 return -EINVAL;
2844
2845 size = sizeof(*dl) + dev_num * sizeof(*dr);
2846
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002847 dl = kzalloc(size, GFP_KERNEL);
2848 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 return -ENOMEM;
2850
2851 dr = dl->dev_req;
2852
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002853 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002854 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002855 unsigned long flags = hdev->flags;
2856
2857 /* When the auto-off is configured it means the transport
2858 * is running, but in that case still indicate that the
2859 * device is actually down.
2860 */
2861 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2862 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002863
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002865 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 if (++n >= dev_num)
2868 break;
2869 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002870 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871
2872 dl->dev_num = n;
2873 size = sizeof(*dl) + n * sizeof(*dr);
2874
2875 err = copy_to_user(arg, dl, size);
2876 kfree(dl);
2877
2878 return err ? -EFAULT : 0;
2879}
2880
2881int hci_get_dev_info(void __user *arg)
2882{
2883 struct hci_dev *hdev;
2884 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002885 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 int err = 0;
2887
2888 if (copy_from_user(&di, arg, sizeof(di)))
2889 return -EFAULT;
2890
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002891 hdev = hci_dev_get(di.dev_id);
2892 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 return -ENODEV;
2894
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002895 /* When the auto-off is configured it means the transport
2896 * is running, but in that case still indicate that the
2897 * device is actually down.
2898 */
2899 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2900 flags = hdev->flags & ~BIT(HCI_UP);
2901 else
2902 flags = hdev->flags;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002903
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 strcpy(di.name, hdev->name);
2905 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002906 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002907 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002909 if (lmp_bredr_capable(hdev)) {
2910 di.acl_mtu = hdev->acl_mtu;
2911 di.acl_pkts = hdev->acl_pkts;
2912 di.sco_mtu = hdev->sco_mtu;
2913 di.sco_pkts = hdev->sco_pkts;
2914 } else {
2915 di.acl_mtu = hdev->le_mtu;
2916 di.acl_pkts = hdev->le_pkts;
2917 di.sco_mtu = 0;
2918 di.sco_pkts = 0;
2919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 di.link_policy = hdev->link_policy;
2921 di.link_mode = hdev->link_mode;
2922
2923 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2924 memcpy(&di.features, &hdev->features, sizeof(di.features));
2925
2926 if (copy_to_user(arg, &di, sizeof(di)))
2927 err = -EFAULT;
2928
2929 hci_dev_put(hdev);
2930
2931 return err;
2932}
2933
2934/* ---- Interface to HCI drivers ---- */
2935
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002936static int hci_rfkill_set_block(void *data, bool blocked)
2937{
2938 struct hci_dev *hdev = data;
2939
2940 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2941
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002942 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2943 return -EBUSY;
2944
Johan Hedberg5e130362013-09-13 08:58:17 +03002945 if (blocked) {
2946 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002947 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2948 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002949 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002950 } else {
2951 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002952 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002953
2954 return 0;
2955}
2956
2957static const struct rfkill_ops hci_rfkill_ops = {
2958 .set_block = hci_rfkill_set_block,
2959};
2960
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002961static void hci_power_on(struct work_struct *work)
2962{
2963 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002964 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002965
2966 BT_DBG("%s", hdev->name);
2967
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002968 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002969 if (err < 0) {
2970 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002971 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002972 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002973
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002974 /* During the HCI setup phase, a few error conditions are
2975 * ignored and they need to be checked now. If they are still
2976 * valid, it is important to turn the device back off.
2977 */
2978 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002979 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002980 (hdev->dev_type == HCI_BREDR &&
2981 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2982 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002983 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2984 hci_dev_do_close(hdev);
2985 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002986 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2987 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002988 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002989
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002990 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002991 /* For unconfigured devices, set the HCI_RAW flag
2992 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002993 */
2994 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2995 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002996
2997 /* For fully configured devices, this will send
2998 * the Index Added event. For unconfigured devices,
2999 * it will send Unconfigued Index Added event.
3000 *
3001 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3002 * and no event will be send.
3003 */
3004 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003005 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003006 /* When the controller is now configured, then it
3007 * is important to clear the HCI_RAW flag.
3008 */
3009 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3010 clear_bit(HCI_RAW, &hdev->flags);
3011
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003012 /* Powering on the controller with HCI_CONFIG set only
3013 * happens with the transition from unconfigured to
3014 * configured. This will send the Index Added event.
3015 */
3016 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003017 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003018}
3019
3020static void hci_power_off(struct work_struct *work)
3021{
Johan Hedberg32435532011-11-07 22:16:04 +02003022 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003023 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003024
3025 BT_DBG("%s", hdev->name);
3026
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003027 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003028}
3029
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003030static void hci_discov_off(struct work_struct *work)
3031{
3032 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003033
3034 hdev = container_of(work, struct hci_dev, discov_off.work);
3035
3036 BT_DBG("%s", hdev->name);
3037
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003038 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003039}
3040
Johan Hedberg35f74982014-02-18 17:14:32 +02003041void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003042{
Johan Hedberg48210022013-01-27 00:31:28 +02003043 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003044
Johan Hedberg48210022013-01-27 00:31:28 +02003045 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3046 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003047 kfree(uuid);
3048 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003049}
3050
Johan Hedberg35f74982014-02-18 17:14:32 +02003051void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003052{
3053 struct list_head *p, *n;
3054
3055 list_for_each_safe(p, n, &hdev->link_keys) {
3056 struct link_key *key;
3057
3058 key = list_entry(p, struct link_key, list);
3059
3060 list_del(p);
3061 kfree(key);
3062 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003063}
3064
Johan Hedberg35f74982014-02-18 17:14:32 +02003065void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003066{
3067 struct smp_ltk *k, *tmp;
3068
3069 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3070 list_del(&k->list);
3071 kfree(k);
3072 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003073}
3074
Johan Hedberg970c4e42014-02-18 10:19:33 +02003075void hci_smp_irks_clear(struct hci_dev *hdev)
3076{
3077 struct smp_irk *k, *tmp;
3078
3079 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3080 list_del(&k->list);
3081 kfree(k);
3082 }
3083}
3084
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003085struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3086{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003087 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003088
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003089 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003090 if (bacmp(bdaddr, &k->bdaddr) == 0)
3091 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003092
3093 return NULL;
3094}
3095
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303096static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003097 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003098{
3099 /* Legacy key */
3100 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303101 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003102
3103 /* Debug keys are insecure so don't store them persistently */
3104 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303105 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003106
3107 /* Changed combination key and there's no previous one */
3108 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303109 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003110
3111 /* Security mode 3 case */
3112 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303113 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003114
3115 /* Neither local nor remote side had no-bonding as requirement */
3116 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303117 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003118
3119 /* Local side had dedicated bonding as requirement */
3120 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303121 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003122
3123 /* Remote side had dedicated bonding as requirement */
3124 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303125 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003126
3127 /* If none of the above criteria match, then don't store the key
3128 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303129 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003130}
3131
Johan Hedberge804d252014-07-16 11:42:28 +03003132static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003133{
Johan Hedberge804d252014-07-16 11:42:28 +03003134 if (type == SMP_LTK)
3135 return HCI_ROLE_MASTER;
3136
3137 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003138}
3139
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003140struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003141 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003142{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003143 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003144
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003145 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003146 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003147 continue;
3148
Johan Hedberge804d252014-07-16 11:42:28 +03003149 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003150 continue;
3151
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003152 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003153 }
3154
3155 return NULL;
3156}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003157
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003158struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003159 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003160{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003161 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003162
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003163 list_for_each_entry(k, &hdev->long_term_keys, list)
3164 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003165 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberge804d252014-07-16 11:42:28 +03003166 ltk_role(k->type) == role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003167 return k;
3168
3169 return NULL;
3170}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003171
Johan Hedberg970c4e42014-02-18 10:19:33 +02003172struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3173{
3174 struct smp_irk *irk;
3175
3176 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3177 if (!bacmp(&irk->rpa, rpa))
3178 return irk;
3179 }
3180
3181 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3182 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3183 bacpy(&irk->rpa, rpa);
3184 return irk;
3185 }
3186 }
3187
3188 return NULL;
3189}
3190
3191struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3192 u8 addr_type)
3193{
3194 struct smp_irk *irk;
3195
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003196 /* Identity Address must be public or static random */
3197 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3198 return NULL;
3199
Johan Hedberg970c4e42014-02-18 10:19:33 +02003200 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3201 if (addr_type == irk->addr_type &&
3202 bacmp(bdaddr, &irk->bdaddr) == 0)
3203 return irk;
3204 }
3205
3206 return NULL;
3207}
3208
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003209struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003210 bdaddr_t *bdaddr, u8 *val, u8 type,
3211 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003212{
3213 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303214 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003215
3216 old_key = hci_find_link_key(hdev, bdaddr);
3217 if (old_key) {
3218 old_key_type = old_key->type;
3219 key = old_key;
3220 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003221 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003222 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003223 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003224 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003225 list_add(&key->list, &hdev->link_keys);
3226 }
3227
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003228 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003229
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003230 /* Some buggy controller combinations generate a changed
3231 * combination key for legacy pairing even when there's no
3232 * previous key */
3233 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003234 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003235 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003236 if (conn)
3237 conn->key_type = type;
3238 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003239
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003240 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003241 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003242 key->pin_len = pin_len;
3243
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003244 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003245 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003246 else
3247 key->type = type;
3248
Johan Hedberg7652ff62014-06-24 13:15:49 +03003249 if (persistent)
3250 *persistent = hci_persistent_key(hdev, conn, type,
3251 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003252
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003253 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003254}
3255
Johan Hedbergca9142b2014-02-19 14:57:44 +02003256struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003257 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003258 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003259{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003260 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003261 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003262
Johan Hedberge804d252014-07-16 11:42:28 +03003263 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003264 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003265 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003266 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003267 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003268 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003269 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003270 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003271 }
3272
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003273 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003274 key->bdaddr_type = addr_type;
3275 memcpy(key->val, tk, sizeof(key->val));
3276 key->authenticated = authenticated;
3277 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003278 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003279 key->enc_size = enc_size;
3280 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003281
Johan Hedbergca9142b2014-02-19 14:57:44 +02003282 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003283}
3284
Johan Hedbergca9142b2014-02-19 14:57:44 +02003285struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3286 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003287{
3288 struct smp_irk *irk;
3289
3290 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3291 if (!irk) {
3292 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3293 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003294 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003295
3296 bacpy(&irk->bdaddr, bdaddr);
3297 irk->addr_type = addr_type;
3298
3299 list_add(&irk->list, &hdev->identity_resolving_keys);
3300 }
3301
3302 memcpy(irk->val, val, 16);
3303 bacpy(&irk->rpa, rpa);
3304
Johan Hedbergca9142b2014-02-19 14:57:44 +02003305 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003306}
3307
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003308int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3309{
3310 struct link_key *key;
3311
3312 key = hci_find_link_key(hdev, bdaddr);
3313 if (!key)
3314 return -ENOENT;
3315
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003316 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003317
3318 list_del(&key->list);
3319 kfree(key);
3320
3321 return 0;
3322}
3323
Johan Hedberge0b2b272014-02-18 17:14:31 +02003324int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003325{
3326 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003327 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003328
3329 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003330 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003331 continue;
3332
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003333 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003334
3335 list_del(&k->list);
3336 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003337 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003338 }
3339
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003340 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003341}
3342
Johan Hedberga7ec7332014-02-18 17:14:35 +02003343void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3344{
3345 struct smp_irk *k, *tmp;
3346
Johan Hedberg668b7b12014-02-21 16:03:31 +02003347 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003348 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3349 continue;
3350
3351 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3352
3353 list_del(&k->list);
3354 kfree(k);
3355 }
3356}
3357
Ville Tervo6bd32322011-02-16 16:32:41 +02003358/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003359static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003360{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003361 struct hci_dev *hdev = container_of(work, struct hci_dev,
3362 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003363
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003364 if (hdev->sent_cmd) {
3365 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3366 u16 opcode = __le16_to_cpu(sent->opcode);
3367
3368 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3369 } else {
3370 BT_ERR("%s command tx timeout", hdev->name);
3371 }
3372
Ville Tervo6bd32322011-02-16 16:32:41 +02003373 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003374 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003375}
3376
Szymon Janc2763eda2011-03-22 13:12:22 +01003377struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003378 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003379{
3380 struct oob_data *data;
3381
3382 list_for_each_entry(data, &hdev->remote_oob_data, list)
3383 if (bacmp(bdaddr, &data->bdaddr) == 0)
3384 return data;
3385
3386 return NULL;
3387}
3388
3389int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3390{
3391 struct oob_data *data;
3392
3393 data = hci_find_remote_oob_data(hdev, bdaddr);
3394 if (!data)
3395 return -ENOENT;
3396
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003397 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003398
3399 list_del(&data->list);
3400 kfree(data);
3401
3402 return 0;
3403}
3404
Johan Hedberg35f74982014-02-18 17:14:32 +02003405void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003406{
3407 struct oob_data *data, *n;
3408
3409 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3410 list_del(&data->list);
3411 kfree(data);
3412 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003413}
3414
Marcel Holtmann07988722014-01-10 02:07:29 -08003415int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3416 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003417{
3418 struct oob_data *data;
3419
3420 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003421 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003422 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003423 if (!data)
3424 return -ENOMEM;
3425
3426 bacpy(&data->bdaddr, bdaddr);
3427 list_add(&data->list, &hdev->remote_oob_data);
3428 }
3429
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003430 memcpy(data->hash192, hash, sizeof(data->hash192));
3431 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003432
Marcel Holtmann07988722014-01-10 02:07:29 -08003433 memset(data->hash256, 0, sizeof(data->hash256));
3434 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3435
3436 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3437
3438 return 0;
3439}
3440
3441int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3442 u8 *hash192, u8 *randomizer192,
3443 u8 *hash256, u8 *randomizer256)
3444{
3445 struct oob_data *data;
3446
3447 data = hci_find_remote_oob_data(hdev, bdaddr);
3448 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003449 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003450 if (!data)
3451 return -ENOMEM;
3452
3453 bacpy(&data->bdaddr, bdaddr);
3454 list_add(&data->list, &hdev->remote_oob_data);
3455 }
3456
3457 memcpy(data->hash192, hash192, sizeof(data->hash192));
3458 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3459
3460 memcpy(data->hash256, hash256, sizeof(data->hash256));
3461 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3462
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003463 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003464
3465 return 0;
3466}
3467
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003468struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003469 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003470{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003471 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003472
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003473 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003474 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003475 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003476 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003477
3478 return NULL;
3479}
3480
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003481void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003482{
3483 struct list_head *p, *n;
3484
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003485 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003486 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003487
3488 list_del(p);
3489 kfree(b);
3490 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003491}
3492
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003493int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003494{
3495 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003496
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003497 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003498 return -EBADF;
3499
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003500 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003501 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003502
Johan Hedberg27f70f32014-07-21 10:50:06 +03003503 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003504 if (!entry)
3505 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003506
3507 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003508 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003509
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003510 list_add(&entry->list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003511
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003512 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003513}
3514
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003515int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003516{
3517 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003518
Johan Hedberg35f74982014-02-18 17:14:32 +02003519 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003520 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003521 return 0;
3522 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003523
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003524 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003525 if (!entry)
3526 return -ENOENT;
3527
3528 list_del(&entry->list);
3529 kfree(entry);
3530
3531 return 0;
3532}
3533
Andre Guedes15819a72014-02-03 13:56:18 -03003534/* This function requires the caller holds hdev->lock */
3535struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3536 bdaddr_t *addr, u8 addr_type)
3537{
3538 struct hci_conn_params *params;
3539
Johan Hedberg738f6182014-07-03 19:33:51 +03003540 /* The conn params list only contains identity addresses */
3541 if (!hci_is_identity_address(addr, addr_type))
3542 return NULL;
3543
Andre Guedes15819a72014-02-03 13:56:18 -03003544 list_for_each_entry(params, &hdev->le_conn_params, list) {
3545 if (bacmp(&params->addr, addr) == 0 &&
3546 params->addr_type == addr_type) {
3547 return params;
3548 }
3549 }
3550
3551 return NULL;
3552}
3553
Andre Guedescef952c2014-02-26 20:21:49 -03003554static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3555{
3556 struct hci_conn *conn;
3557
3558 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3559 if (!conn)
3560 return false;
3561
3562 if (conn->dst_type != type)
3563 return false;
3564
3565 if (conn->state != BT_CONNECTED)
3566 return false;
3567
3568 return true;
3569}
3570
Andre Guedes15819a72014-02-03 13:56:18 -03003571/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003572struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3573 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003574{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003575 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003576
Johan Hedberg738f6182014-07-03 19:33:51 +03003577 /* The list only contains identity addresses */
3578 if (!hci_is_identity_address(addr, addr_type))
3579 return NULL;
3580
Johan Hedberg501f8822014-07-04 12:37:26 +03003581 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003582 if (bacmp(&param->addr, addr) == 0 &&
3583 param->addr_type == addr_type)
3584 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003585 }
3586
3587 return NULL;
3588}
3589
3590/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003591struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3592 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003593{
3594 struct hci_conn_params *params;
3595
Johan Hedbergc46245b2014-07-02 17:37:33 +03003596 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003597 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003598
3599 params = hci_conn_params_lookup(hdev, addr, addr_type);
3600 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003601 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003602
3603 params = kzalloc(sizeof(*params), GFP_KERNEL);
3604 if (!params) {
3605 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003606 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003607 }
3608
3609 bacpy(&params->addr, addr);
3610 params->addr_type = addr_type;
3611
3612 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003613 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003614
3615 params->conn_min_interval = hdev->le_conn_min_interval;
3616 params->conn_max_interval = hdev->le_conn_max_interval;
3617 params->conn_latency = hdev->le_conn_latency;
3618 params->supervision_timeout = hdev->le_supv_timeout;
3619 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3620
3621 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3622
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003623 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003624}
3625
3626/* This function requires the caller holds hdev->lock */
3627int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003628 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003629{
3630 struct hci_conn_params *params;
3631
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003632 params = hci_conn_params_add(hdev, addr, addr_type);
3633 if (!params)
3634 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003635
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003636 if (params->auto_connect == auto_connect)
3637 return 0;
3638
Johan Hedberg95305ba2014-07-04 12:37:21 +03003639 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003640
Andre Guedescef952c2014-02-26 20:21:49 -03003641 switch (auto_connect) {
3642 case HCI_AUTO_CONN_DISABLED:
3643 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003644 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003645 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003646 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003647 list_add(&params->action, &hdev->pend_le_reports);
3648 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003649 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003650 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003651 if (!is_connected(hdev, addr, addr_type)) {
3652 list_add(&params->action, &hdev->pend_le_conns);
3653 hci_update_background_scan(hdev);
3654 }
Andre Guedescef952c2014-02-26 20:21:49 -03003655 break;
3656 }
Andre Guedes15819a72014-02-03 13:56:18 -03003657
Johan Hedberg851efca2014-07-02 22:42:00 +03003658 params->auto_connect = auto_connect;
3659
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003660 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3661 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003662
3663 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003664}
3665
3666/* This function requires the caller holds hdev->lock */
3667void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3668{
3669 struct hci_conn_params *params;
3670
3671 params = hci_conn_params_lookup(hdev, addr, addr_type);
3672 if (!params)
3673 return;
3674
Johan Hedberg95305ba2014-07-04 12:37:21 +03003675 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003676 list_del(&params->list);
3677 kfree(params);
3678
Johan Hedberg95305ba2014-07-04 12:37:21 +03003679 hci_update_background_scan(hdev);
3680
Andre Guedes15819a72014-02-03 13:56:18 -03003681 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3682}
3683
3684/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003685void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3686{
3687 struct hci_conn_params *params, *tmp;
3688
3689 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3690 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3691 continue;
3692 list_del(&params->list);
3693 kfree(params);
3694 }
3695
3696 BT_DBG("All LE disabled connection parameters were removed");
3697}
3698
3699/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003700void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003701{
3702 struct hci_conn_params *params, *tmp;
3703
3704 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003705 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003706 list_del(&params->list);
3707 kfree(params);
3708 }
3709
Johan Hedberga2f41a82014-07-04 12:37:19 +03003710 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003711
Andre Guedes15819a72014-02-03 13:56:18 -03003712 BT_DBG("All LE connection parameters were removed");
3713}
3714
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003715static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003716{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003717 if (status) {
3718 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003719
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003720 hci_dev_lock(hdev);
3721 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3722 hci_dev_unlock(hdev);
3723 return;
3724 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003725}
3726
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003727static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003728{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003729 /* General inquiry access code (GIAC) */
3730 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3731 struct hci_request req;
3732 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003733 int err;
3734
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003735 if (status) {
3736 BT_ERR("Failed to disable LE scanning: status %d", status);
3737 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003738 }
3739
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003740 switch (hdev->discovery.type) {
3741 case DISCOV_TYPE_LE:
3742 hci_dev_lock(hdev);
3743 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3744 hci_dev_unlock(hdev);
3745 break;
3746
3747 case DISCOV_TYPE_INTERLEAVED:
3748 hci_req_init(&req, hdev);
3749
3750 memset(&cp, 0, sizeof(cp));
3751 memcpy(&cp.lap, lap, sizeof(cp.lap));
3752 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3753 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3754
3755 hci_dev_lock(hdev);
3756
3757 hci_inquiry_cache_flush(hdev);
3758
3759 err = hci_req_run(&req, inquiry_complete);
3760 if (err) {
3761 BT_ERR("Inquiry request failed: err %d", err);
3762 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3763 }
3764
3765 hci_dev_unlock(hdev);
3766 break;
3767 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003768}
3769
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003770static void le_scan_disable_work(struct work_struct *work)
3771{
3772 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003773 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003774 struct hci_request req;
3775 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003776
3777 BT_DBG("%s", hdev->name);
3778
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003779 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003780
Andre Guedesb1efcc22014-02-26 20:21:40 -03003781 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003782
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003783 err = hci_req_run(&req, le_scan_disable_work_complete);
3784 if (err)
3785 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003786}
3787
Johan Hedberg8d972502014-02-28 12:54:14 +02003788static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3789{
3790 struct hci_dev *hdev = req->hdev;
3791
3792 /* If we're advertising or initiating an LE connection we can't
3793 * go ahead and change the random address at this time. This is
3794 * because the eventual initiator address used for the
3795 * subsequently created connection will be undefined (some
3796 * controllers use the new address and others the one we had
3797 * when the operation started).
3798 *
3799 * In this kind of scenario skip the update and let the random
3800 * address be updated at the next cycle.
3801 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003802 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003803 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3804 BT_DBG("Deferring random address update");
3805 return;
3806 }
3807
3808 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3809}
3810
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003811int hci_update_random_address(struct hci_request *req, bool require_privacy,
3812 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003813{
3814 struct hci_dev *hdev = req->hdev;
3815 int err;
3816
3817 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003818 * current RPA has expired or there is something else than
3819 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003820 */
3821 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003822 int to;
3823
3824 *own_addr_type = ADDR_LE_DEV_RANDOM;
3825
3826 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003827 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003828 return 0;
3829
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003830 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003831 if (err < 0) {
3832 BT_ERR("%s failed to generate new RPA", hdev->name);
3833 return err;
3834 }
3835
Johan Hedberg8d972502014-02-28 12:54:14 +02003836 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003837
3838 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3839 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3840
3841 return 0;
3842 }
3843
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003844 /* In case of required privacy without resolvable private address,
3845 * use an unresolvable private address. This is useful for active
3846 * scanning and non-connectable advertising.
3847 */
3848 if (require_privacy) {
3849 bdaddr_t urpa;
3850
3851 get_random_bytes(&urpa, 6);
3852 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3853
3854 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003855 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003856 return 0;
3857 }
3858
Johan Hedbergebd3a742014-02-23 19:42:21 +02003859 /* If forcing static address is in use or there is no public
3860 * address use the static address as random address (but skip
3861 * the HCI command if the current random address is already the
3862 * static one.
3863 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003864 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003865 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3866 *own_addr_type = ADDR_LE_DEV_RANDOM;
3867 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3868 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3869 &hdev->static_addr);
3870 return 0;
3871 }
3872
3873 /* Neither privacy nor static address is being used so use a
3874 * public address.
3875 */
3876 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3877
3878 return 0;
3879}
3880
Johan Hedberga1f4c312014-02-27 14:05:41 +02003881/* Copy the Identity Address of the controller.
3882 *
3883 * If the controller has a public BD_ADDR, then by default use that one.
3884 * If this is a LE only controller without a public address, default to
3885 * the static random address.
3886 *
3887 * For debugging purposes it is possible to force controllers with a
3888 * public address to use the static random address instead.
3889 */
3890void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3891 u8 *bdaddr_type)
3892{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003893 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003894 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3895 bacpy(bdaddr, &hdev->static_addr);
3896 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3897 } else {
3898 bacpy(bdaddr, &hdev->bdaddr);
3899 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3900 }
3901}
3902
David Herrmann9be0dab2012-04-22 14:39:57 +02003903/* Alloc HCI device */
3904struct hci_dev *hci_alloc_dev(void)
3905{
3906 struct hci_dev *hdev;
3907
Johan Hedberg27f70f32014-07-21 10:50:06 +03003908 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003909 if (!hdev)
3910 return NULL;
3911
David Herrmannb1b813d2012-04-22 14:39:58 +02003912 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3913 hdev->esco_type = (ESCO_HV1);
3914 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003915 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3916 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003917 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003918 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3919 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003920
David Herrmannb1b813d2012-04-22 14:39:58 +02003921 hdev->sniff_max_interval = 800;
3922 hdev->sniff_min_interval = 80;
3923
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003924 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003925 hdev->le_scan_interval = 0x0060;
3926 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003927 hdev->le_conn_min_interval = 0x0028;
3928 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003929 hdev->le_conn_latency = 0x0000;
3930 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003931
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003932 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003933 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003934 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3935 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003936
David Herrmannb1b813d2012-04-22 14:39:58 +02003937 mutex_init(&hdev->lock);
3938 mutex_init(&hdev->req_lock);
3939
3940 INIT_LIST_HEAD(&hdev->mgmt_pending);
3941 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003942 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003943 INIT_LIST_HEAD(&hdev->uuids);
3944 INIT_LIST_HEAD(&hdev->link_keys);
3945 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003946 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003947 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003948 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003949 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003950 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003951 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003952 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003953
3954 INIT_WORK(&hdev->rx_work, hci_rx_work);
3955 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3956 INIT_WORK(&hdev->tx_work, hci_tx_work);
3957 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003958
David Herrmannb1b813d2012-04-22 14:39:58 +02003959 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3960 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3961 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3962
David Herrmannb1b813d2012-04-22 14:39:58 +02003963 skb_queue_head_init(&hdev->rx_q);
3964 skb_queue_head_init(&hdev->cmd_q);
3965 skb_queue_head_init(&hdev->raw_q);
3966
3967 init_waitqueue_head(&hdev->req_wait_q);
3968
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003969 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003970
David Herrmannb1b813d2012-04-22 14:39:58 +02003971 hci_init_sysfs(hdev);
3972 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003973
3974 return hdev;
3975}
3976EXPORT_SYMBOL(hci_alloc_dev);
3977
3978/* Free HCI device */
3979void hci_free_dev(struct hci_dev *hdev)
3980{
David Herrmann9be0dab2012-04-22 14:39:57 +02003981 /* will free via device release */
3982 put_device(&hdev->dev);
3983}
3984EXPORT_SYMBOL(hci_free_dev);
3985
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986/* Register HCI device */
3987int hci_register_dev(struct hci_dev *hdev)
3988{
David Herrmannb1b813d2012-04-22 14:39:58 +02003989 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990
Marcel Holtmann74292d52014-07-06 15:50:27 +02003991 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 return -EINVAL;
3993
Mat Martineau08add512011-11-02 16:18:36 -07003994 /* Do not allow HCI_AMP devices to register at index 0,
3995 * so the index can be used as the AMP controller ID.
3996 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003997 switch (hdev->dev_type) {
3998 case HCI_BREDR:
3999 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4000 break;
4001 case HCI_AMP:
4002 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4003 break;
4004 default:
4005 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004007
Sasha Levin3df92b32012-05-27 22:36:56 +02004008 if (id < 0)
4009 return id;
4010
Linus Torvalds1da177e2005-04-16 15:20:36 -07004011 sprintf(hdev->name, "hci%d", id);
4012 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004013
4014 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4015
Kees Cookd8537542013-07-03 15:04:57 -07004016 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4017 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004018 if (!hdev->workqueue) {
4019 error = -ENOMEM;
4020 goto err;
4021 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004022
Kees Cookd8537542013-07-03 15:04:57 -07004023 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4024 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004025 if (!hdev->req_workqueue) {
4026 destroy_workqueue(hdev->workqueue);
4027 error = -ENOMEM;
4028 goto err;
4029 }
4030
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004031 if (!IS_ERR_OR_NULL(bt_debugfs))
4032 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4033
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004034 dev_set_name(&hdev->dev, "%s", hdev->name);
4035
Johan Hedberg99780a72014-02-18 10:40:07 +02004036 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4037 CRYPTO_ALG_ASYNC);
4038 if (IS_ERR(hdev->tfm_aes)) {
4039 BT_ERR("Unable to create crypto context");
4040 error = PTR_ERR(hdev->tfm_aes);
4041 hdev->tfm_aes = NULL;
4042 goto err_wqueue;
4043 }
4044
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004045 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004046 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02004047 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004049 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004050 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4051 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004052 if (hdev->rfkill) {
4053 if (rfkill_register(hdev->rfkill) < 0) {
4054 rfkill_destroy(hdev->rfkill);
4055 hdev->rfkill = NULL;
4056 }
4057 }
4058
Johan Hedberg5e130362013-09-13 08:58:17 +03004059 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4060 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4061
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004062 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004063 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004064
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004065 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004066 /* Assume BR/EDR support until proven otherwise (such as
4067 * through reading supported features during init.
4068 */
4069 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4070 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004071
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004072 write_lock(&hci_dev_list_lock);
4073 list_add(&hdev->list, &hci_dev_list);
4074 write_unlock(&hci_dev_list_lock);
4075
Marcel Holtmann4a964402014-07-02 19:10:33 +02004076 /* Devices that are marked for raw-only usage are unconfigured
4077 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004078 */
4079 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004080 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004081
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004083 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084
Johan Hedberg19202572013-01-14 22:33:51 +02004085 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004086
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004088
Johan Hedberg99780a72014-02-18 10:40:07 +02004089err_tfm:
4090 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004091err_wqueue:
4092 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004093 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004094err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004095 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004096
David Herrmann33ca9542011-10-08 14:58:49 +02004097 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098}
4099EXPORT_SYMBOL(hci_register_dev);
4100
4101/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004102void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103{
Sasha Levin3df92b32012-05-27 22:36:56 +02004104 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004105
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004106 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107
Johan Hovold94324962012-03-15 14:48:41 +01004108 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4109
Sasha Levin3df92b32012-05-27 22:36:56 +02004110 id = hdev->id;
4111
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004112 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004114 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115
4116 hci_dev_do_close(hdev);
4117
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304118 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004119 kfree_skb(hdev->reassembly[i]);
4120
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004121 cancel_work_sync(&hdev->power_on);
4122
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004123 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004124 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4125 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004126 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004127 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004128 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004129 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004130
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004131 /* mgmt_index_removed should take care of emptying the
4132 * pending list */
4133 BUG_ON(!list_empty(&hdev->mgmt_pending));
4134
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135 hci_notify(hdev, HCI_DEV_UNREG);
4136
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004137 if (hdev->rfkill) {
4138 rfkill_unregister(hdev->rfkill);
4139 rfkill_destroy(hdev->rfkill);
4140 }
4141
Johan Hedberg99780a72014-02-18 10:40:07 +02004142 if (hdev->tfm_aes)
4143 crypto_free_blkcipher(hdev->tfm_aes);
4144
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004145 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004146
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004147 debugfs_remove_recursive(hdev->debugfs);
4148
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004149 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004150 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004151
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004152 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004153 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004154 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004155 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004156 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004157 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004158 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004159 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004160 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004161 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004162 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004163
David Herrmanndc946bd2012-01-07 15:47:24 +01004164 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004165
4166 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167}
4168EXPORT_SYMBOL(hci_unregister_dev);
4169
4170/* Suspend HCI device */
4171int hci_suspend_dev(struct hci_dev *hdev)
4172{
4173 hci_notify(hdev, HCI_DEV_SUSPEND);
4174 return 0;
4175}
4176EXPORT_SYMBOL(hci_suspend_dev);
4177
4178/* Resume HCI device */
4179int hci_resume_dev(struct hci_dev *hdev)
4180{
4181 hci_notify(hdev, HCI_DEV_RESUME);
4182 return 0;
4183}
4184EXPORT_SYMBOL(hci_resume_dev);
4185
Marcel Holtmann76bca882009-11-18 00:40:39 +01004186/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004187int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004188{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004189 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004190 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004191 kfree_skb(skb);
4192 return -ENXIO;
4193 }
4194
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004195 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004196 bt_cb(skb)->incoming = 1;
4197
4198 /* Time stamp */
4199 __net_timestamp(skb);
4200
Marcel Holtmann76bca882009-11-18 00:40:39 +01004201 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004202 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004203
Marcel Holtmann76bca882009-11-18 00:40:39 +01004204 return 0;
4205}
4206EXPORT_SYMBOL(hci_recv_frame);
4207
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304208static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004209 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304210{
4211 int len = 0;
4212 int hlen = 0;
4213 int remain = count;
4214 struct sk_buff *skb;
4215 struct bt_skb_cb *scb;
4216
4217 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004218 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304219 return -EILSEQ;
4220
4221 skb = hdev->reassembly[index];
4222
4223 if (!skb) {
4224 switch (type) {
4225 case HCI_ACLDATA_PKT:
4226 len = HCI_MAX_FRAME_SIZE;
4227 hlen = HCI_ACL_HDR_SIZE;
4228 break;
4229 case HCI_EVENT_PKT:
4230 len = HCI_MAX_EVENT_SIZE;
4231 hlen = HCI_EVENT_HDR_SIZE;
4232 break;
4233 case HCI_SCODATA_PKT:
4234 len = HCI_MAX_SCO_SIZE;
4235 hlen = HCI_SCO_HDR_SIZE;
4236 break;
4237 }
4238
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004239 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304240 if (!skb)
4241 return -ENOMEM;
4242
4243 scb = (void *) skb->cb;
4244 scb->expect = hlen;
4245 scb->pkt_type = type;
4246
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304247 hdev->reassembly[index] = skb;
4248 }
4249
4250 while (count) {
4251 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004252 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304253
4254 memcpy(skb_put(skb, len), data, len);
4255
4256 count -= len;
4257 data += len;
4258 scb->expect -= len;
4259 remain = count;
4260
4261 switch (type) {
4262 case HCI_EVENT_PKT:
4263 if (skb->len == HCI_EVENT_HDR_SIZE) {
4264 struct hci_event_hdr *h = hci_event_hdr(skb);
4265 scb->expect = h->plen;
4266
4267 if (skb_tailroom(skb) < scb->expect) {
4268 kfree_skb(skb);
4269 hdev->reassembly[index] = NULL;
4270 return -ENOMEM;
4271 }
4272 }
4273 break;
4274
4275 case HCI_ACLDATA_PKT:
4276 if (skb->len == HCI_ACL_HDR_SIZE) {
4277 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4278 scb->expect = __le16_to_cpu(h->dlen);
4279
4280 if (skb_tailroom(skb) < scb->expect) {
4281 kfree_skb(skb);
4282 hdev->reassembly[index] = NULL;
4283 return -ENOMEM;
4284 }
4285 }
4286 break;
4287
4288 case HCI_SCODATA_PKT:
4289 if (skb->len == HCI_SCO_HDR_SIZE) {
4290 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4291 scb->expect = h->dlen;
4292
4293 if (skb_tailroom(skb) < scb->expect) {
4294 kfree_skb(skb);
4295 hdev->reassembly[index] = NULL;
4296 return -ENOMEM;
4297 }
4298 }
4299 break;
4300 }
4301
4302 if (scb->expect == 0) {
4303 /* Complete frame */
4304
4305 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004306 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304307
4308 hdev->reassembly[index] = NULL;
4309 return remain;
4310 }
4311 }
4312
4313 return remain;
4314}
4315
Marcel Holtmannef222012007-07-11 06:42:04 +02004316int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4317{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304318 int rem = 0;
4319
Marcel Holtmannef222012007-07-11 06:42:04 +02004320 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4321 return -EILSEQ;
4322
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004323 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004324 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304325 if (rem < 0)
4326 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004327
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304328 data += (count - rem);
4329 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004330 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004331
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304332 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004333}
4334EXPORT_SYMBOL(hci_recv_fragment);
4335
Suraj Sumangala99811512010-07-14 13:02:19 +05304336#define STREAM_REASSEMBLY 0
4337
4338int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4339{
4340 int type;
4341 int rem = 0;
4342
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004343 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304344 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4345
4346 if (!skb) {
4347 struct { char type; } *pkt;
4348
4349 /* Start of the frame */
4350 pkt = data;
4351 type = pkt->type;
4352
4353 data++;
4354 count--;
4355 } else
4356 type = bt_cb(skb)->pkt_type;
4357
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004358 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004359 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304360 if (rem < 0)
4361 return rem;
4362
4363 data += (count - rem);
4364 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004365 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304366
4367 return rem;
4368}
4369EXPORT_SYMBOL(hci_recv_stream_fragment);
4370
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371/* ---- Interface to upper protocols ---- */
4372
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373int hci_register_cb(struct hci_cb *cb)
4374{
4375 BT_DBG("%p name %s", cb, cb->name);
4376
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004377 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004379 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380
4381 return 0;
4382}
4383EXPORT_SYMBOL(hci_register_cb);
4384
4385int hci_unregister_cb(struct hci_cb *cb)
4386{
4387 BT_DBG("%p name %s", cb, cb->name);
4388
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004389 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004391 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392
4393 return 0;
4394}
4395EXPORT_SYMBOL(hci_unregister_cb);
4396
Marcel Holtmann51086992013-10-10 14:54:19 -07004397static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004399 int err;
4400
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004401 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004403 /* Time stamp */
4404 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004406 /* Send copy to monitor */
4407 hci_send_to_monitor(hdev, skb);
4408
4409 if (atomic_read(&hdev->promisc)) {
4410 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004411 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412 }
4413
4414 /* Get rid of skb owner, prior to sending to the driver. */
4415 skb_orphan(skb);
4416
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004417 err = hdev->send(hdev, skb);
4418 if (err < 0) {
4419 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4420 kfree_skb(skb);
4421 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422}
4423
Johan Hedberg3119ae92013-03-05 20:37:44 +02004424void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4425{
4426 skb_queue_head_init(&req->cmd_q);
4427 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004428 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004429}
4430
4431int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4432{
4433 struct hci_dev *hdev = req->hdev;
4434 struct sk_buff *skb;
4435 unsigned long flags;
4436
4437 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4438
Andre Guedes5d73e032013-03-08 11:20:16 -03004439 /* If an error occured during request building, remove all HCI
4440 * commands queued on the HCI request queue.
4441 */
4442 if (req->err) {
4443 skb_queue_purge(&req->cmd_q);
4444 return req->err;
4445 }
4446
Johan Hedberg3119ae92013-03-05 20:37:44 +02004447 /* Do not allow empty requests */
4448 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004449 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004450
4451 skb = skb_peek_tail(&req->cmd_q);
4452 bt_cb(skb)->req.complete = complete;
4453
4454 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4455 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4456 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4457
4458 queue_work(hdev->workqueue, &hdev->cmd_work);
4459
4460 return 0;
4461}
4462
Marcel Holtmann899de762014-07-11 05:51:58 +02004463bool hci_req_pending(struct hci_dev *hdev)
4464{
4465 return (hdev->req_status == HCI_REQ_PEND);
4466}
4467
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004468static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004469 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470{
4471 int len = HCI_COMMAND_HDR_SIZE + plen;
4472 struct hci_command_hdr *hdr;
4473 struct sk_buff *skb;
4474
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004476 if (!skb)
4477 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478
4479 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004480 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 hdr->plen = plen;
4482
4483 if (plen)
4484 memcpy(skb_put(skb, plen), param, plen);
4485
4486 BT_DBG("skb len %d", skb->len);
4487
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004488 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004489
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004490 return skb;
4491}
4492
4493/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004494int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4495 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004496{
4497 struct sk_buff *skb;
4498
4499 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4500
4501 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4502 if (!skb) {
4503 BT_ERR("%s no memory for command", hdev->name);
4504 return -ENOMEM;
4505 }
4506
Johan Hedberg11714b32013-03-05 20:37:47 +02004507 /* Stand-alone HCI commands must be flaged as
4508 * single-command requests.
4509 */
4510 bt_cb(skb)->req.start = true;
4511
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004513 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514
4515 return 0;
4516}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517
Johan Hedberg71c76a12013-03-05 20:37:46 +02004518/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004519void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4520 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004521{
4522 struct hci_dev *hdev = req->hdev;
4523 struct sk_buff *skb;
4524
4525 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4526
Andre Guedes34739c12013-03-08 11:20:18 -03004527 /* If an error occured during request building, there is no point in
4528 * queueing the HCI command. We can simply return.
4529 */
4530 if (req->err)
4531 return;
4532
Johan Hedberg71c76a12013-03-05 20:37:46 +02004533 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4534 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004535 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4536 hdev->name, opcode);
4537 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004538 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004539 }
4540
4541 if (skb_queue_empty(&req->cmd_q))
4542 bt_cb(skb)->req.start = true;
4543
Johan Hedberg02350a72013-04-03 21:50:29 +03004544 bt_cb(skb)->req.event = event;
4545
Johan Hedberg71c76a12013-03-05 20:37:46 +02004546 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004547}
4548
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004549void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4550 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004551{
4552 hci_req_add_ev(req, opcode, plen, param, 0);
4553}
4554
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004556void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557{
4558 struct hci_command_hdr *hdr;
4559
4560 if (!hdev->sent_cmd)
4561 return NULL;
4562
4563 hdr = (void *) hdev->sent_cmd->data;
4564
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004565 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566 return NULL;
4567
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004568 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569
4570 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4571}
4572
4573/* Send ACL data */
4574static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4575{
4576 struct hci_acl_hdr *hdr;
4577 int len = skb->len;
4578
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004579 skb_push(skb, HCI_ACL_HDR_SIZE);
4580 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004581 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004582 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4583 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584}
4585
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004586static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004587 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004589 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590 struct hci_dev *hdev = conn->hdev;
4591 struct sk_buff *list;
4592
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004593 skb->len = skb_headlen(skb);
4594 skb->data_len = 0;
4595
4596 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004597
4598 switch (hdev->dev_type) {
4599 case HCI_BREDR:
4600 hci_add_acl_hdr(skb, conn->handle, flags);
4601 break;
4602 case HCI_AMP:
4603 hci_add_acl_hdr(skb, chan->handle, flags);
4604 break;
4605 default:
4606 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4607 return;
4608 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004609
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004610 list = skb_shinfo(skb)->frag_list;
4611 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 /* Non fragmented */
4613 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4614
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004615 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 } else {
4617 /* Fragmented */
4618 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4619
4620 skb_shinfo(skb)->frag_list = NULL;
4621
4622 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004623 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004625 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004626
4627 flags &= ~ACL_START;
4628 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629 do {
4630 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004631
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004632 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004633 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634
4635 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4636
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004637 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 } while (list);
4639
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004640 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004642}
4643
4644void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4645{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004646 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004647
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004648 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004649
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004650 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004652 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654
4655/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004656void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657{
4658 struct hci_dev *hdev = conn->hdev;
4659 struct hci_sco_hdr hdr;
4660
4661 BT_DBG("%s len %d", hdev->name, skb->len);
4662
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004663 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 hdr.dlen = skb->len;
4665
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004666 skb_push(skb, HCI_SCO_HDR_SIZE);
4667 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004668 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004670 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004671
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004673 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675
4676/* ---- HCI TX task (outgoing data) ---- */
4677
4678/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004679static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4680 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681{
4682 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004683 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004684 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004686 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004688
4689 rcu_read_lock();
4690
4691 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004692 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004694
4695 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4696 continue;
4697
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698 num++;
4699
4700 if (c->sent < min) {
4701 min = c->sent;
4702 conn = c;
4703 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004704
4705 if (hci_conn_num(hdev, type) == num)
4706 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004707 }
4708
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004709 rcu_read_unlock();
4710
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004712 int cnt, q;
4713
4714 switch (conn->type) {
4715 case ACL_LINK:
4716 cnt = hdev->acl_cnt;
4717 break;
4718 case SCO_LINK:
4719 case ESCO_LINK:
4720 cnt = hdev->sco_cnt;
4721 break;
4722 case LE_LINK:
4723 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4724 break;
4725 default:
4726 cnt = 0;
4727 BT_ERR("Unknown link type");
4728 }
4729
4730 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 *quote = q ? q : 1;
4732 } else
4733 *quote = 0;
4734
4735 BT_DBG("conn %p quote %d", conn, *quote);
4736 return conn;
4737}
4738
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004739static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740{
4741 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004742 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743
Ville Tervobae1f5d92011-02-10 22:38:53 -03004744 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004746 rcu_read_lock();
4747
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004749 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004750 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004751 BT_ERR("%s killing stalled connection %pMR",
4752 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004753 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 }
4755 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004756
4757 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004758}
4759
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004760static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4761 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004762{
4763 struct hci_conn_hash *h = &hdev->conn_hash;
4764 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004765 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004766 struct hci_conn *conn;
4767 int cnt, q, conn_num = 0;
4768
4769 BT_DBG("%s", hdev->name);
4770
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004771 rcu_read_lock();
4772
4773 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004774 struct hci_chan *tmp;
4775
4776 if (conn->type != type)
4777 continue;
4778
4779 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4780 continue;
4781
4782 conn_num++;
4783
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004784 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004785 struct sk_buff *skb;
4786
4787 if (skb_queue_empty(&tmp->data_q))
4788 continue;
4789
4790 skb = skb_peek(&tmp->data_q);
4791 if (skb->priority < cur_prio)
4792 continue;
4793
4794 if (skb->priority > cur_prio) {
4795 num = 0;
4796 min = ~0;
4797 cur_prio = skb->priority;
4798 }
4799
4800 num++;
4801
4802 if (conn->sent < min) {
4803 min = conn->sent;
4804 chan = tmp;
4805 }
4806 }
4807
4808 if (hci_conn_num(hdev, type) == conn_num)
4809 break;
4810 }
4811
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004812 rcu_read_unlock();
4813
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004814 if (!chan)
4815 return NULL;
4816
4817 switch (chan->conn->type) {
4818 case ACL_LINK:
4819 cnt = hdev->acl_cnt;
4820 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004821 case AMP_LINK:
4822 cnt = hdev->block_cnt;
4823 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004824 case SCO_LINK:
4825 case ESCO_LINK:
4826 cnt = hdev->sco_cnt;
4827 break;
4828 case LE_LINK:
4829 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4830 break;
4831 default:
4832 cnt = 0;
4833 BT_ERR("Unknown link type");
4834 }
4835
4836 q = cnt / num;
4837 *quote = q ? q : 1;
4838 BT_DBG("chan %p quote %d", chan, *quote);
4839 return chan;
4840}
4841
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004842static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4843{
4844 struct hci_conn_hash *h = &hdev->conn_hash;
4845 struct hci_conn *conn;
4846 int num = 0;
4847
4848 BT_DBG("%s", hdev->name);
4849
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004850 rcu_read_lock();
4851
4852 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004853 struct hci_chan *chan;
4854
4855 if (conn->type != type)
4856 continue;
4857
4858 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4859 continue;
4860
4861 num++;
4862
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004863 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004864 struct sk_buff *skb;
4865
4866 if (chan->sent) {
4867 chan->sent = 0;
4868 continue;
4869 }
4870
4871 if (skb_queue_empty(&chan->data_q))
4872 continue;
4873
4874 skb = skb_peek(&chan->data_q);
4875 if (skb->priority >= HCI_PRIO_MAX - 1)
4876 continue;
4877
4878 skb->priority = HCI_PRIO_MAX - 1;
4879
4880 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004881 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004882 }
4883
4884 if (hci_conn_num(hdev, type) == num)
4885 break;
4886 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004887
4888 rcu_read_unlock();
4889
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004890}
4891
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004892static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4893{
4894 /* Calculate count of blocks used by this packet */
4895 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4896}
4897
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004898static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004900 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901 /* ACL tx timeout must be longer than maximum
4902 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004903 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004904 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004905 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004907}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004909static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004910{
4911 unsigned int cnt = hdev->acl_cnt;
4912 struct hci_chan *chan;
4913 struct sk_buff *skb;
4914 int quote;
4915
4916 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004917
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004918 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004919 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004920 u32 priority = (skb_peek(&chan->data_q))->priority;
4921 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004922 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004923 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004924
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004925 /* Stop if priority has changed */
4926 if (skb->priority < priority)
4927 break;
4928
4929 skb = skb_dequeue(&chan->data_q);
4930
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004931 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004932 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004933
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004934 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935 hdev->acl_last_tx = jiffies;
4936
4937 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004938 chan->sent++;
4939 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940 }
4941 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004942
4943 if (cnt != hdev->acl_cnt)
4944 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945}
4946
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004947static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004948{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004949 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004950 struct hci_chan *chan;
4951 struct sk_buff *skb;
4952 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004953 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004954
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004955 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004956
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004957 BT_DBG("%s", hdev->name);
4958
4959 if (hdev->dev_type == HCI_AMP)
4960 type = AMP_LINK;
4961 else
4962 type = ACL_LINK;
4963
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004964 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004965 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004966 u32 priority = (skb_peek(&chan->data_q))->priority;
4967 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4968 int blocks;
4969
4970 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004971 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004972
4973 /* Stop if priority has changed */
4974 if (skb->priority < priority)
4975 break;
4976
4977 skb = skb_dequeue(&chan->data_q);
4978
4979 blocks = __get_blocks(hdev, skb);
4980 if (blocks > hdev->block_cnt)
4981 return;
4982
4983 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004984 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004985
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004986 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004987 hdev->acl_last_tx = jiffies;
4988
4989 hdev->block_cnt -= blocks;
4990 quote -= blocks;
4991
4992 chan->sent += blocks;
4993 chan->conn->sent += blocks;
4994 }
4995 }
4996
4997 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004998 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004999}
5000
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005001static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005002{
5003 BT_DBG("%s", hdev->name);
5004
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005005 /* No ACL link over BR/EDR controller */
5006 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5007 return;
5008
5009 /* No AMP link over AMP controller */
5010 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005011 return;
5012
5013 switch (hdev->flow_ctl_mode) {
5014 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5015 hci_sched_acl_pkt(hdev);
5016 break;
5017
5018 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5019 hci_sched_acl_blk(hdev);
5020 break;
5021 }
5022}
5023
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005025static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026{
5027 struct hci_conn *conn;
5028 struct sk_buff *skb;
5029 int quote;
5030
5031 BT_DBG("%s", hdev->name);
5032
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005033 if (!hci_conn_num(hdev, SCO_LINK))
5034 return;
5035
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5037 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5038 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005039 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040
5041 conn->sent++;
5042 if (conn->sent == ~0)
5043 conn->sent = 0;
5044 }
5045 }
5046}
5047
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005048static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005049{
5050 struct hci_conn *conn;
5051 struct sk_buff *skb;
5052 int quote;
5053
5054 BT_DBG("%s", hdev->name);
5055
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005056 if (!hci_conn_num(hdev, ESCO_LINK))
5057 return;
5058
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005059 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5060 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005061 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5062 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005063 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005064
5065 conn->sent++;
5066 if (conn->sent == ~0)
5067 conn->sent = 0;
5068 }
5069 }
5070}
5071
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005072static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005073{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005074 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005075 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005076 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005077
5078 BT_DBG("%s", hdev->name);
5079
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005080 if (!hci_conn_num(hdev, LE_LINK))
5081 return;
5082
Marcel Holtmann4a964402014-07-02 19:10:33 +02005083 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005084 /* LE tx timeout must be longer than maximum
5085 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005086 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005087 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005088 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005089 }
5090
5091 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005092 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005093 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005094 u32 priority = (skb_peek(&chan->data_q))->priority;
5095 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005096 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005097 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005098
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005099 /* Stop if priority has changed */
5100 if (skb->priority < priority)
5101 break;
5102
5103 skb = skb_dequeue(&chan->data_q);
5104
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005105 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005106 hdev->le_last_tx = jiffies;
5107
5108 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005109 chan->sent++;
5110 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005111 }
5112 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005113
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005114 if (hdev->le_pkts)
5115 hdev->le_cnt = cnt;
5116 else
5117 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005118
5119 if (cnt != tmp)
5120 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005121}
5122
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005123static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005125 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126 struct sk_buff *skb;
5127
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005128 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005129 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130
Marcel Holtmann52de5992013-09-03 18:08:38 -07005131 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5132 /* Schedule queues and send stuff to HCI driver */
5133 hci_sched_acl(hdev);
5134 hci_sched_sco(hdev);
5135 hci_sched_esco(hdev);
5136 hci_sched_le(hdev);
5137 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005138
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139 /* Send next queued raw (unknown type) packet */
5140 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005141 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142}
5143
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005144/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145
5146/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005147static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148{
5149 struct hci_acl_hdr *hdr = (void *) skb->data;
5150 struct hci_conn *conn;
5151 __u16 handle, flags;
5152
5153 skb_pull(skb, HCI_ACL_HDR_SIZE);
5154
5155 handle = __le16_to_cpu(hdr->handle);
5156 flags = hci_flags(handle);
5157 handle = hci_handle(handle);
5158
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005159 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005160 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161
5162 hdev->stat.acl_rx++;
5163
5164 hci_dev_lock(hdev);
5165 conn = hci_conn_hash_lookup_handle(hdev, handle);
5166 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005167
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005169 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005170
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005172 l2cap_recv_acldata(conn, skb, flags);
5173 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005175 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005176 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177 }
5178
5179 kfree_skb(skb);
5180}
5181
5182/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005183static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184{
5185 struct hci_sco_hdr *hdr = (void *) skb->data;
5186 struct hci_conn *conn;
5187 __u16 handle;
5188
5189 skb_pull(skb, HCI_SCO_HDR_SIZE);
5190
5191 handle = __le16_to_cpu(hdr->handle);
5192
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005193 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194
5195 hdev->stat.sco_rx++;
5196
5197 hci_dev_lock(hdev);
5198 conn = hci_conn_hash_lookup_handle(hdev, handle);
5199 hci_dev_unlock(hdev);
5200
5201 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005203 sco_recv_scodata(conn, skb);
5204 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005206 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005207 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 }
5209
5210 kfree_skb(skb);
5211}
5212
Johan Hedberg9238f362013-03-05 20:37:48 +02005213static bool hci_req_is_complete(struct hci_dev *hdev)
5214{
5215 struct sk_buff *skb;
5216
5217 skb = skb_peek(&hdev->cmd_q);
5218 if (!skb)
5219 return true;
5220
5221 return bt_cb(skb)->req.start;
5222}
5223
Johan Hedberg42c6b122013-03-05 20:37:49 +02005224static void hci_resend_last(struct hci_dev *hdev)
5225{
5226 struct hci_command_hdr *sent;
5227 struct sk_buff *skb;
5228 u16 opcode;
5229
5230 if (!hdev->sent_cmd)
5231 return;
5232
5233 sent = (void *) hdev->sent_cmd->data;
5234 opcode = __le16_to_cpu(sent->opcode);
5235 if (opcode == HCI_OP_RESET)
5236 return;
5237
5238 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5239 if (!skb)
5240 return;
5241
5242 skb_queue_head(&hdev->cmd_q, skb);
5243 queue_work(hdev->workqueue, &hdev->cmd_work);
5244}
5245
Johan Hedberg9238f362013-03-05 20:37:48 +02005246void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5247{
5248 hci_req_complete_t req_complete = NULL;
5249 struct sk_buff *skb;
5250 unsigned long flags;
5251
5252 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5253
Johan Hedberg42c6b122013-03-05 20:37:49 +02005254 /* If the completed command doesn't match the last one that was
5255 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005256 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005257 if (!hci_sent_cmd_data(hdev, opcode)) {
5258 /* Some CSR based controllers generate a spontaneous
5259 * reset complete event during init and any pending
5260 * command will never be completed. In such a case we
5261 * need to resend whatever was the last sent
5262 * command.
5263 */
5264 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5265 hci_resend_last(hdev);
5266
Johan Hedberg9238f362013-03-05 20:37:48 +02005267 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005268 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005269
5270 /* If the command succeeded and there's still more commands in
5271 * this request the request is not yet complete.
5272 */
5273 if (!status && !hci_req_is_complete(hdev))
5274 return;
5275
5276 /* If this was the last command in a request the complete
5277 * callback would be found in hdev->sent_cmd instead of the
5278 * command queue (hdev->cmd_q).
5279 */
5280 if (hdev->sent_cmd) {
5281 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005282
5283 if (req_complete) {
5284 /* We must set the complete callback to NULL to
5285 * avoid calling the callback more than once if
5286 * this function gets called again.
5287 */
5288 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5289
Johan Hedberg9238f362013-03-05 20:37:48 +02005290 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005291 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005292 }
5293
5294 /* Remove all pending commands belonging to this request */
5295 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5296 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5297 if (bt_cb(skb)->req.start) {
5298 __skb_queue_head(&hdev->cmd_q, skb);
5299 break;
5300 }
5301
5302 req_complete = bt_cb(skb)->req.complete;
5303 kfree_skb(skb);
5304 }
5305 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5306
5307call_complete:
5308 if (req_complete)
5309 req_complete(hdev, status);
5310}
5311
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005312static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005314 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 struct sk_buff *skb;
5316
5317 BT_DBG("%s", hdev->name);
5318
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005320 /* Send copy to monitor */
5321 hci_send_to_monitor(hdev, skb);
5322
Linus Torvalds1da177e2005-04-16 15:20:36 -07005323 if (atomic_read(&hdev->promisc)) {
5324 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005325 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005326 }
5327
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005328 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329 kfree_skb(skb);
5330 continue;
5331 }
5332
5333 if (test_bit(HCI_INIT, &hdev->flags)) {
5334 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005335 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005336 case HCI_ACLDATA_PKT:
5337 case HCI_SCODATA_PKT:
5338 kfree_skb(skb);
5339 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005340 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341 }
5342
5343 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005344 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005345 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005346 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347 hci_event_packet(hdev, skb);
5348 break;
5349
5350 case HCI_ACLDATA_PKT:
5351 BT_DBG("%s ACL data packet", hdev->name);
5352 hci_acldata_packet(hdev, skb);
5353 break;
5354
5355 case HCI_SCODATA_PKT:
5356 BT_DBG("%s SCO data packet", hdev->name);
5357 hci_scodata_packet(hdev, skb);
5358 break;
5359
5360 default:
5361 kfree_skb(skb);
5362 break;
5363 }
5364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005365}
5366
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005367static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005369 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370 struct sk_buff *skb;
5371
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005372 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5373 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005376 if (atomic_read(&hdev->cmd_cnt)) {
5377 skb = skb_dequeue(&hdev->cmd_q);
5378 if (!skb)
5379 return;
5380
Wei Yongjun7585b972009-02-25 18:29:52 +08005381 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005383 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005384 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005386 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005387 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005388 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005389 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005390 schedule_delayed_work(&hdev->cmd_timer,
5391 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392 } else {
5393 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005394 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395 }
5396 }
5397}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005398
5399void hci_req_add_le_scan_disable(struct hci_request *req)
5400{
5401 struct hci_cp_le_set_scan_enable cp;
5402
5403 memset(&cp, 0, sizeof(cp));
5404 cp.enable = LE_SCAN_DISABLE;
5405 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5406}
Andre Guedesa4790db2014-02-26 20:21:47 -03005407
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005408void hci_req_add_le_passive_scan(struct hci_request *req)
5409{
5410 struct hci_cp_le_set_scan_param param_cp;
5411 struct hci_cp_le_set_scan_enable enable_cp;
5412 struct hci_dev *hdev = req->hdev;
5413 u8 own_addr_type;
5414
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005415 /* Set require_privacy to false since no SCAN_REQ are send
5416 * during passive scanning. Not using an unresolvable address
5417 * here is important so that peer devices using direct
5418 * advertising with our address will be correctly reported
5419 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005420 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005421 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005422 return;
5423
5424 memset(&param_cp, 0, sizeof(param_cp));
5425 param_cp.type = LE_SCAN_PASSIVE;
5426 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5427 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5428 param_cp.own_address_type = own_addr_type;
5429 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5430 &param_cp);
5431
5432 memset(&enable_cp, 0, sizeof(enable_cp));
5433 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005434 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005435 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5436 &enable_cp);
5437}
5438
Andre Guedesa4790db2014-02-26 20:21:47 -03005439static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5440{
5441 if (status)
5442 BT_DBG("HCI request failed to update background scanning: "
5443 "status 0x%2.2x", status);
5444}
5445
5446/* This function controls the background scanning based on hdev->pend_le_conns
5447 * list. If there are pending LE connection we start the background scanning,
5448 * otherwise we stop it.
5449 *
5450 * This function requires the caller holds hdev->lock.
5451 */
5452void hci_update_background_scan(struct hci_dev *hdev)
5453{
Andre Guedesa4790db2014-02-26 20:21:47 -03005454 struct hci_request req;
5455 struct hci_conn *conn;
5456 int err;
5457
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005458 if (!test_bit(HCI_UP, &hdev->flags) ||
5459 test_bit(HCI_INIT, &hdev->flags) ||
5460 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005461 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005462 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005463 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005464 return;
5465
Johan Hedberga70f4b52014-07-07 15:19:50 +03005466 /* No point in doing scanning if LE support hasn't been enabled */
5467 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5468 return;
5469
Johan Hedbergae23ada2014-07-07 13:24:59 +03005470 /* If discovery is active don't interfere with it */
5471 if (hdev->discovery.state != DISCOVERY_STOPPED)
5472 return;
5473
Andre Guedesa4790db2014-02-26 20:21:47 -03005474 hci_req_init(&req, hdev);
5475
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005476 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005477 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005478 /* If there is no pending LE connections or devices
5479 * to be scanned for, we should stop the background
5480 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005481 */
5482
5483 /* If controller is not scanning we are done. */
5484 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5485 return;
5486
5487 hci_req_add_le_scan_disable(&req);
5488
5489 BT_DBG("%s stopping background scanning", hdev->name);
5490 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005491 /* If there is at least one pending LE connection, we should
5492 * keep the background scan running.
5493 */
5494
Andre Guedesa4790db2014-02-26 20:21:47 -03005495 /* If controller is connecting, we should not start scanning
5496 * since some controllers are not able to scan and connect at
5497 * the same time.
5498 */
5499 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5500 if (conn)
5501 return;
5502
Andre Guedes4340a122014-03-10 18:26:24 -03005503 /* If controller is currently scanning, we stop it to ensure we
5504 * don't miss any advertising (due to duplicates filter).
5505 */
5506 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5507 hci_req_add_le_scan_disable(&req);
5508
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005509 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005510
5511 BT_DBG("%s starting background scanning", hdev->name);
5512 }
5513
5514 err = hci_req_run(&req, update_background_scan_complete);
5515 if (err)
5516 BT_ERR("Failed to run HCI request: err %d", err);
5517}