blob: ff150e3c7ad1d70194a681471cb63dc3205ed10f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Johan Hedberg66593582014-07-09 12:59:14 +0300203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmann47219832013-10-17 17:24:15 -0700228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700236
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700243
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700244 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
Marcel Holtmann041000b2013-10-17 12:02:31 -0700351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
Marcel Holtmann111902f2014-06-21 04:53:17 +0200421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800422 return -EALREADY;
423
Marcel Holtmann111902f2014-06-21 04:53:17 +0200424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700462 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700521 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700549 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
Marcel Holtmannac345812014-02-23 12:44:25 -0800625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200628 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
Johan Hedberga1f4c312014-02-27 14:05:41 +0200633 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800634
Johan Hedberga1f4c312014-02-27 14:05:41 +0200635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800636 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700704{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712}
713
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
722
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
725
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700731 return -EINVAL;
732
Marcel Holtmann111902f2014-06-21 04:53:17 +0200733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800734 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700735
Marcel Holtmann111902f2014-06-21 04:53:17 +0200736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800737
738 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700739}
740
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
Marcel Holtmann92202182013-10-18 16:38:10 -0700747
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
Marcel Holtmann3698d702014-02-18 21:54:49 -0800773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800809 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800814 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700869 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
Marcel Holtmannf1649572014-06-30 12:34:38 +0200917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200973static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300974{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200975 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 struct hci_conn_params *p;
977
978 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300979 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300981 p->auto_connect);
982 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300983 hci_dev_unlock(hdev);
984
985 return 0;
986}
987
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200988static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300989{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200990 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300991}
992
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200993static const struct file_operations device_list_fops = {
994 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300996 .llseek = seq_lseek,
997 .release = single_release,
998};
999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000/* ---- HCI requests ---- */
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1010 }
1011}
1012
1013static void hci_req_cancel(struct hci_dev *hdev, int err)
1014{
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1021 }
1022}
1023
Fengguang Wu77a63e02013-04-20 16:24:31 +03001024static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001026{
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1030
1031 hci_dev_lock(hdev);
1032
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1035
1036 hci_dev_unlock(hdev);
1037
1038 if (!skb)
1039 return ERR_PTR(-ENODATA);
1040
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1043 goto failed;
1044 }
1045
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001049 if (event) {
1050 if (hdr->evt != event)
1051 goto failed;
1052 return skb;
1053 }
1054
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057 goto failed;
1058 }
1059
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1062 goto failed;
1063 }
1064
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1067
1068 if (opcode == __le16_to_cpu(ev->opcode))
1069 return skb;
1070
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1073
1074failed:
1075 kfree_skb(skb);
1076 return ERR_PTR(-ENODATA);
1077}
1078
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001079struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001080 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001081{
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1084 int err = 0;
1085
1086 BT_DBG("%s", hdev->name);
1087
1088 hci_req_init(&req, hdev);
1089
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001090 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001091
1092 hdev->req_status = HCI_REQ_PEND;
1093
1094 err = hci_req_run(&req, hci_req_sync_complete);
1095 if (err < 0)
1096 return ERR_PTR(err);
1097
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1100
1101 schedule_timeout(timeout);
1102
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1107
1108 switch (hdev->req_status) {
1109 case HCI_REQ_DONE:
1110 err = -bt_to_errno(hdev->req_result);
1111 break;
1112
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1115 break;
1116
1117 default:
1118 err = -ETIMEDOUT;
1119 break;
1120 }
1121
1122 hdev->req_status = hdev->req_result = 0;
1123
1124 BT_DBG("%s end: err %d", hdev->name, err);
1125
1126 if (err < 0)
1127 return ERR_PTR(err);
1128
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001129 return hci_get_cmd_complete(hdev, opcode, event);
1130}
1131EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001134 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001135{
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137}
1138EXPORT_SYMBOL(__hci_cmd_sync);
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001141static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 void (*func)(struct hci_request *req,
1143 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001144 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 DECLARE_WAITQUEUE(wait, current);
1148 int err = 0;
1149
1150 BT_DBG("%s start", hdev->name);
1151
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_init(&req, hdev);
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 hdev->req_status = HCI_REQ_PEND;
1155
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001157
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 err = hci_req_run(&req, hci_req_sync_complete);
1159 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001160 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001161
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 */
Andre Guedes920c8302013-03-08 11:20:15 -03001167 if (err == -ENODATA)
1168 return 0;
1169
1170 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001171 }
1172
Andre Guedesbc4445c2013-03-08 11:20:13 -03001173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return -EINTR;
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001185 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Johan Hedberga5040ef2011-01-10 13:28:59 +02001197 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 return err;
1202}
1203
Johan Hedberg01178cd2013-03-05 20:37:41 +02001204static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 void (*req)(struct hci_request *req,
1206 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001207 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
1209 int ret;
1210
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001211 if (!test_bit(HCI_UP, &hdev->flags))
1212 return -ENETDOWN;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 /* Serialize all requests */
1215 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 hci_req_unlock(hdev);
1218
1219 return ret;
1220}
1221
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001238 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240
1241 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001246{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001248
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001249 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001251
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001258 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001260
1261 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001263
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001269}
1270
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001272{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001274
1275 BT_DBG("%s %ld", hdev->name, opt);
1276
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001277 /* Reset */
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001280
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281 switch (hdev->dev_type) {
1282 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 break;
1285
1286 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001288 break;
1289
1290 default:
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1292 break;
1293 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001294}
1295
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001298 struct hci_dev *hdev = req->hdev;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 __le16 param;
1301 __u8 flt_type;
1302
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305
1306 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308
1309 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001311
1312 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001314
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001324
1325 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001326 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1331 */
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001336}
1337
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001340 struct hci_dev *hdev = req->hdev;
1341
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344
1345 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
Johan Hedberg2177bab2013-03-05 20:37:43 +02001351 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001356
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360}
1361
1362static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363{
1364 if (lmp_ext_inq_capable(hdev))
1365 return 0x02;
1366
1367 if (lmp_inq_rssi_capable(hdev))
1368 return 0x01;
1369
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1372 return 0x01;
1373
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376 return 0x01;
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378 return 0x01;
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380 return 0x01;
1381 }
1382
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1385 return 0x01;
1386
1387 return 0x00;
1388}
1389
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391{
1392 u8 mode;
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395
Johan Hedberg42c6b122013-03-05 20:37:49 +02001396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397}
1398
Johan Hedberg42c6b122013-03-05 20:37:49 +02001399static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 struct hci_dev *hdev = req->hdev;
1402
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1406 */
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1411 */
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413 return;
1414
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001421 } else {
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001431
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001436 }
1437
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1443
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1467 */
1468 }
1469
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1472
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001474}
1475
Johan Hedberg42c6b122013-03-05 20:37:49 +02001476static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 struct hci_dev *hdev = req->hdev;
1479
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001482 else
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001484
1485 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1490 */
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001493
1494 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1500 */
1501 hdev->max_page = 0x01;
1502
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507 } else {
1508 struct hci_cp_write_eir cp;
1509
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1512
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515 }
1516
1517 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001518 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1525
1526 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529 }
1530
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001535 }
1536}
1537
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001539{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1543
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1552
1553 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555}
1556
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001560 struct hci_cp_write_le_host_supported cp;
1561
Johan Hedbergc73eee92013-04-19 18:35:21 +03001562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1564 return;
1565
Johan Hedberg2177bab2013-03-05 20:37:43 +02001566 memset(&cp, 0, sizeof(cp));
1567
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569 cp.le = 0x01;
1570 cp.simul = lmp_le_br_capable(hdev);
1571 }
1572
1573 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576}
1577
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001578static void hci_set_event_mask_page_2(struct hci_request *req)
1579{
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1585 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001586 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1591 }
1592
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1595 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001596 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1601 }
1602
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001603 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001605 events[2] |= 0x80;
1606
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608}
1609
Johan Hedberg42c6b122013-03-05 20:37:49 +02001610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001614
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001615 hci_setup_event_mask(req);
1616
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1620 *
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001625 *
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001629 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001632 struct hci_cp_delete_stored_link_key cp;
1633
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637 sizeof(cp), &cp);
1638 }
1639
Johan Hedberg2177bab2013-03-05 20:37:43 +02001640 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001641 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001642
Andre Guedes9193c6e2014-07-01 18:10:09 -03001643 if (lmp_le_capable(hdev)) {
1644 u8 events[8];
1645
1646 memset(events, 0, sizeof(events));
1647 events[0] = 0x1f;
Andre Guedes662bc2e2014-07-01 18:10:10 -03001648
1649 /* If controller supports the Connection Parameters Request
1650 * Link Layer Procedure, enable the corresponding event.
1651 */
1652 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1653 events[0] |= 0x20; /* LE Remote Connection
1654 * Parameter Request
1655 */
1656
Andre Guedes9193c6e2014-07-01 18:10:09 -03001657 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1658 events);
1659
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001660 if (hdev->commands[25] & 0x40) {
1661 /* Read LE Advertising Channel TX Power */
1662 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1663 }
1664
Johan Hedberg42c6b122013-03-05 20:37:49 +02001665 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001666 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001667
1668 /* Read features beyond page 1 if available */
1669 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1670 struct hci_cp_read_local_ext_features cp;
1671
1672 cp.page = p;
1673 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1674 sizeof(cp), &cp);
1675 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001676}
1677
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001678static void hci_init4_req(struct hci_request *req, unsigned long opt)
1679{
1680 struct hci_dev *hdev = req->hdev;
1681
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001682 /* Set event mask page 2 if the HCI command for it is supported */
1683 if (hdev->commands[22] & 0x04)
1684 hci_set_event_mask_page_2(req);
1685
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001686 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001687 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001688 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001689
1690 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001691 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001692 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001693 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1694 u8 support = 0x01;
1695 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1696 sizeof(support), &support);
1697 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001698}
1699
Johan Hedberg2177bab2013-03-05 20:37:43 +02001700static int __hci_init(struct hci_dev *hdev)
1701{
1702 int err;
1703
1704 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1705 if (err < 0)
1706 return err;
1707
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001708 /* The Device Under Test (DUT) mode is special and available for
1709 * all controller types. So just create it early on.
1710 */
1711 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1712 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1713 &dut_mode_fops);
1714 }
1715
Johan Hedberg2177bab2013-03-05 20:37:43 +02001716 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1717 * BR/EDR/LE type controllers. AMP controllers only need the
1718 * first stage init.
1719 */
1720 if (hdev->dev_type != HCI_BREDR)
1721 return 0;
1722
1723 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1724 if (err < 0)
1725 return err;
1726
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001727 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1728 if (err < 0)
1729 return err;
1730
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001731 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1732 if (err < 0)
1733 return err;
1734
1735 /* Only create debugfs entries during the initial setup
1736 * phase and not every time the controller gets powered on.
1737 */
1738 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1739 return 0;
1740
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001741 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1742 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001743 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1744 &hdev->manufacturer);
1745 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1746 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001747 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1748 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001749 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1750 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001751 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1752
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001753 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1754 &conn_info_min_age_fops);
1755 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1756 &conn_info_max_age_fops);
1757
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001758 if (lmp_bredr_capable(hdev)) {
1759 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1760 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001761 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1762 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001763 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1764 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001765 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1766 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001767 }
1768
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001769 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001770 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1771 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001772 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1773 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001774 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1775 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001776 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001777
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001778 if (lmp_sniff_capable(hdev)) {
1779 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1780 hdev, &idle_timeout_fops);
1781 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1782 hdev, &sniff_min_interval_fops);
1783 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1784 hdev, &sniff_max_interval_fops);
1785 }
1786
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001787 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001788 debugfs_create_file("identity", 0400, hdev->debugfs,
1789 hdev, &identity_fops);
1790 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1791 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001792 debugfs_create_file("random_address", 0444, hdev->debugfs,
1793 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001794 debugfs_create_file("static_address", 0444, hdev->debugfs,
1795 hdev, &static_address_fops);
1796
1797 /* For controllers with a public address, provide a debug
1798 * option to force the usage of the configured static
1799 * address. By default the public address is used.
1800 */
1801 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1802 debugfs_create_file("force_static_address", 0644,
1803 hdev->debugfs, hdev,
1804 &force_static_address_fops);
1805
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001806 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1807 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001808 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1809 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001810 debugfs_create_file("identity_resolving_keys", 0400,
1811 hdev->debugfs, hdev,
1812 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001813 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1814 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001815 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1816 hdev, &conn_min_interval_fops);
1817 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1818 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001819 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1820 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001821 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1822 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001823 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1824 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001825 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1826 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001827 debugfs_create_u16("discov_interleaved_timeout", 0644,
1828 hdev->debugfs,
1829 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001830 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001831
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001832 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001833}
1834
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001835static void hci_init0_req(struct hci_request *req, unsigned long opt)
1836{
1837 struct hci_dev *hdev = req->hdev;
1838
1839 BT_DBG("%s %ld", hdev->name, opt);
1840
1841 /* Reset */
1842 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1843 hci_reset_req(req, 0);
1844
1845 /* Read Local Version */
1846 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1847
1848 /* Read BD Address */
1849 if (hdev->set_bdaddr)
1850 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1851}
1852
1853static int __hci_unconf_init(struct hci_dev *hdev)
1854{
1855 int err;
1856
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001857 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1858 return 0;
1859
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001860 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1861 if (err < 0)
1862 return err;
1863
1864 return 0;
1865}
1866
Johan Hedberg42c6b122013-03-05 20:37:49 +02001867static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868{
1869 __u8 scan = opt;
1870
Johan Hedberg42c6b122013-03-05 20:37:49 +02001871 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872
1873 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001874 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875}
1876
Johan Hedberg42c6b122013-03-05 20:37:49 +02001877static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878{
1879 __u8 auth = opt;
1880
Johan Hedberg42c6b122013-03-05 20:37:49 +02001881 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001884 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885}
1886
Johan Hedberg42c6b122013-03-05 20:37:49 +02001887static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888{
1889 __u8 encrypt = opt;
1890
Johan Hedberg42c6b122013-03-05 20:37:49 +02001891 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001893 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001894 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895}
1896
Johan Hedberg42c6b122013-03-05 20:37:49 +02001897static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001898{
1899 __le16 policy = cpu_to_le16(opt);
1900
Johan Hedberg42c6b122013-03-05 20:37:49 +02001901 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001902
1903 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001904 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001905}
1906
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001907/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 * Device is held on return. */
1909struct hci_dev *hci_dev_get(int index)
1910{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001911 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913 BT_DBG("%d", index);
1914
1915 if (index < 0)
1916 return NULL;
1917
1918 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001919 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 if (d->id == index) {
1921 hdev = hci_dev_hold(d);
1922 break;
1923 }
1924 }
1925 read_unlock(&hci_dev_list_lock);
1926 return hdev;
1927}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928
1929/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001930
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001931bool hci_discovery_active(struct hci_dev *hdev)
1932{
1933 struct discovery_state *discov = &hdev->discovery;
1934
Andre Guedes6fbe1952012-02-03 17:47:58 -03001935 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001936 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001937 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001938 return true;
1939
Andre Guedes6fbe1952012-02-03 17:47:58 -03001940 default:
1941 return false;
1942 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001943}
1944
Johan Hedbergff9ef572012-01-04 14:23:45 +02001945void hci_discovery_set_state(struct hci_dev *hdev, int state)
1946{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001947 int old_state = hdev->discovery.state;
1948
Johan Hedbergff9ef572012-01-04 14:23:45 +02001949 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1950
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001951 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001952 return;
1953
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001954 hdev->discovery.state = state;
1955
Johan Hedbergff9ef572012-01-04 14:23:45 +02001956 switch (state) {
1957 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001958 hci_update_background_scan(hdev);
1959
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001960 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001961 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001962 break;
1963 case DISCOVERY_STARTING:
1964 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001965 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001966 mgmt_discovering(hdev, 1);
1967 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001968 case DISCOVERY_RESOLVING:
1969 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001970 case DISCOVERY_STOPPING:
1971 break;
1972 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001973}
1974
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001975void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976{
Johan Hedberg30883512012-01-04 14:16:21 +02001977 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001978 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Johan Hedberg561aafb2012-01-04 13:31:59 +02001980 list_for_each_entry_safe(p, n, &cache->all, all) {
1981 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001982 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001984
1985 INIT_LIST_HEAD(&cache->unknown);
1986 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987}
1988
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001989struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1990 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991{
Johan Hedberg30883512012-01-04 14:16:21 +02001992 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 struct inquiry_entry *e;
1994
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001995 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Johan Hedberg561aafb2012-01-04 13:31:59 +02001997 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001999 return e;
2000 }
2001
2002 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003}
2004
Johan Hedberg561aafb2012-01-04 13:31:59 +02002005struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002006 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002007{
Johan Hedberg30883512012-01-04 14:16:21 +02002008 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002009 struct inquiry_entry *e;
2010
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002011 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002012
2013 list_for_each_entry(e, &cache->unknown, list) {
2014 if (!bacmp(&e->data.bdaddr, bdaddr))
2015 return e;
2016 }
2017
2018 return NULL;
2019}
2020
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002021struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002022 bdaddr_t *bdaddr,
2023 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002024{
2025 struct discovery_state *cache = &hdev->discovery;
2026 struct inquiry_entry *e;
2027
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002028 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002029
2030 list_for_each_entry(e, &cache->resolve, list) {
2031 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2032 return e;
2033 if (!bacmp(&e->data.bdaddr, bdaddr))
2034 return e;
2035 }
2036
2037 return NULL;
2038}
2039
Johan Hedberga3d4e202012-01-09 00:53:02 +02002040void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002041 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002042{
2043 struct discovery_state *cache = &hdev->discovery;
2044 struct list_head *pos = &cache->resolve;
2045 struct inquiry_entry *p;
2046
2047 list_del(&ie->list);
2048
2049 list_for_each_entry(p, &cache->resolve, list) {
2050 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002051 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002052 break;
2053 pos = &p->list;
2054 }
2055
2056 list_add(&ie->list, pos);
2057}
2058
Marcel Holtmannaf589252014-07-01 14:11:20 +02002059u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2060 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061{
Johan Hedberg30883512012-01-04 14:16:21 +02002062 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002063 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002064 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002066 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Szymon Janc2b2fec42012-11-20 11:38:54 +01002068 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2069
Marcel Holtmannaf589252014-07-01 14:11:20 +02002070 if (!data->ssp_mode)
2071 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002072
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002073 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002074 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002075 if (!ie->data.ssp_mode)
2076 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002077
Johan Hedberga3d4e202012-01-09 00:53:02 +02002078 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002079 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002080 ie->data.rssi = data->rssi;
2081 hci_inquiry_cache_update_resolve(hdev, ie);
2082 }
2083
Johan Hedberg561aafb2012-01-04 13:31:59 +02002084 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002085 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002086
Johan Hedberg561aafb2012-01-04 13:31:59 +02002087 /* Entry not in the cache. Add new one. */
2088 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002089 if (!ie) {
2090 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2091 goto done;
2092 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002093
2094 list_add(&ie->all, &cache->all);
2095
2096 if (name_known) {
2097 ie->name_state = NAME_KNOWN;
2098 } else {
2099 ie->name_state = NAME_NOT_KNOWN;
2100 list_add(&ie->list, &cache->unknown);
2101 }
2102
2103update:
2104 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002105 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002106 ie->name_state = NAME_KNOWN;
2107 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 }
2109
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002110 memcpy(&ie->data, data, sizeof(*data));
2111 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002113
2114 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002115 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002116
Marcel Holtmannaf589252014-07-01 14:11:20 +02002117done:
2118 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119}
2120
2121static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2122{
Johan Hedberg30883512012-01-04 14:16:21 +02002123 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 struct inquiry_info *info = (struct inquiry_info *) buf;
2125 struct inquiry_entry *e;
2126 int copied = 0;
2127
Johan Hedberg561aafb2012-01-04 13:31:59 +02002128 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002130
2131 if (copied >= num)
2132 break;
2133
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 bacpy(&info->bdaddr, &data->bdaddr);
2135 info->pscan_rep_mode = data->pscan_rep_mode;
2136 info->pscan_period_mode = data->pscan_period_mode;
2137 info->pscan_mode = data->pscan_mode;
2138 memcpy(info->dev_class, data->dev_class, 3);
2139 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002140
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002142 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 }
2144
2145 BT_DBG("cache %p, copied %d", cache, copied);
2146 return copied;
2147}
2148
Johan Hedberg42c6b122013-03-05 20:37:49 +02002149static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150{
2151 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002152 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 struct hci_cp_inquiry cp;
2154
2155 BT_DBG("%s", hdev->name);
2156
2157 if (test_bit(HCI_INQUIRY, &hdev->flags))
2158 return;
2159
2160 /* Start Inquiry */
2161 memcpy(&cp.lap, &ir->lap, 3);
2162 cp.length = ir->length;
2163 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002164 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165}
2166
Andre Guedes3e13fa12013-03-27 20:04:56 -03002167static int wait_inquiry(void *word)
2168{
2169 schedule();
2170 return signal_pending(current);
2171}
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173int hci_inquiry(void __user *arg)
2174{
2175 __u8 __user *ptr = arg;
2176 struct hci_inquiry_req ir;
2177 struct hci_dev *hdev;
2178 int err = 0, do_inquiry = 0, max_rsp;
2179 long timeo;
2180 __u8 *buf;
2181
2182 if (copy_from_user(&ir, ptr, sizeof(ir)))
2183 return -EFAULT;
2184
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002185 hdev = hci_dev_get(ir.dev_id);
2186 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 return -ENODEV;
2188
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002189 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2190 err = -EBUSY;
2191 goto done;
2192 }
2193
Marcel Holtmann4a964402014-07-02 19:10:33 +02002194 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002195 err = -EOPNOTSUPP;
2196 goto done;
2197 }
2198
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002199 if (hdev->dev_type != HCI_BREDR) {
2200 err = -EOPNOTSUPP;
2201 goto done;
2202 }
2203
Johan Hedberg56f87902013-10-02 13:43:13 +03002204 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2205 err = -EOPNOTSUPP;
2206 goto done;
2207 }
2208
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002209 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002210 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002211 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002212 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 do_inquiry = 1;
2214 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002215 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
Marcel Holtmann04837f62006-07-03 10:02:33 +02002217 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002218
2219 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002220 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2221 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002222 if (err < 0)
2223 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002224
2225 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2226 * cleared). If it is interrupted by a signal, return -EINTR.
2227 */
2228 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2229 TASK_INTERRUPTIBLE))
2230 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002231 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002233 /* for unlimited number of responses we will use buffer with
2234 * 255 entries
2235 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2237
2238 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2239 * copy it to the user space.
2240 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002241 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002242 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 err = -ENOMEM;
2244 goto done;
2245 }
2246
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002247 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002249 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
2251 BT_DBG("num_rsp %d", ir.num_rsp);
2252
2253 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2254 ptr += sizeof(ir);
2255 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002256 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002258 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 err = -EFAULT;
2260
2261 kfree(buf);
2262
2263done:
2264 hci_dev_put(hdev);
2265 return err;
2266}
2267
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002268static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 int ret = 0;
2271
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 BT_DBG("%s %p", hdev->name, hdev);
2273
2274 hci_req_lock(hdev);
2275
Johan Hovold94324962012-03-15 14:48:41 +01002276 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2277 ret = -ENODEV;
2278 goto done;
2279 }
2280
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002281 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2282 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002283 /* Check for rfkill but allow the HCI setup stage to
2284 * proceed (which in itself doesn't cause any RF activity).
2285 */
2286 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2287 ret = -ERFKILL;
2288 goto done;
2289 }
2290
2291 /* Check for valid public address or a configured static
2292 * random adddress, but let the HCI setup proceed to
2293 * be able to determine if there is a public address
2294 * or not.
2295 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002296 * In case of user channel usage, it is not important
2297 * if a public address or static random address is
2298 * available.
2299 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002300 * This check is only valid for BR/EDR controllers
2301 * since AMP controllers do not have an address.
2302 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002303 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2304 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002305 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2306 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2307 ret = -EADDRNOTAVAIL;
2308 goto done;
2309 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002310 }
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 if (test_bit(HCI_UP, &hdev->flags)) {
2313 ret = -EALREADY;
2314 goto done;
2315 }
2316
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 if (hdev->open(hdev)) {
2318 ret = -EIO;
2319 goto done;
2320 }
2321
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002322 atomic_set(&hdev->cmd_cnt, 1);
2323 set_bit(HCI_INIT, &hdev->flags);
2324
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002325 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2326 if (hdev->setup)
2327 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002328
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002329 /* The transport driver can set these quirks before
2330 * creating the HCI device or in its setup callback.
2331 *
2332 * In case any of them is set, the controller has to
2333 * start up as unconfigured.
2334 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002335 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2336 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002337 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002338
2339 /* For an unconfigured controller it is required to
2340 * read at least the version information provided by
2341 * the Read Local Version Information command.
2342 *
2343 * If the set_bdaddr driver callback is provided, then
2344 * also the original Bluetooth public device address
2345 * will be read using the Read BD Address command.
2346 */
2347 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2348 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002349 }
2350
Marcel Holtmann9713c172014-07-06 12:11:15 +02002351 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2352 /* If public address change is configured, ensure that
2353 * the address gets programmed. If the driver does not
2354 * support changing the public address, fail the power
2355 * on procedure.
2356 */
2357 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2358 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002359 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2360 else
2361 ret = -EADDRNOTAVAIL;
2362 }
2363
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002364 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002365 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002366 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002367 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 }
2369
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002370 clear_bit(HCI_INIT, &hdev->flags);
2371
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 if (!ret) {
2373 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002374 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 set_bit(HCI_UP, &hdev->flags);
2376 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002377 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002378 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002379 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002380 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002381 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002382 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002383 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002384 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002385 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002386 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002388 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002389 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002390 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
2392 skb_queue_purge(&hdev->cmd_q);
2393 skb_queue_purge(&hdev->rx_q);
2394
2395 if (hdev->flush)
2396 hdev->flush(hdev);
2397
2398 if (hdev->sent_cmd) {
2399 kfree_skb(hdev->sent_cmd);
2400 hdev->sent_cmd = NULL;
2401 }
2402
2403 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002404 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 }
2406
2407done:
2408 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 return ret;
2410}
2411
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002412/* ---- HCI ioctl helpers ---- */
2413
2414int hci_dev_open(__u16 dev)
2415{
2416 struct hci_dev *hdev;
2417 int err;
2418
2419 hdev = hci_dev_get(dev);
2420 if (!hdev)
2421 return -ENODEV;
2422
Marcel Holtmann4a964402014-07-02 19:10:33 +02002423 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002424 * up as user channel. Trying to bring them up as normal devices
2425 * will result into a failure. Only user channel operation is
2426 * possible.
2427 *
2428 * When this function is called for a user channel, the flag
2429 * HCI_USER_CHANNEL will be set first before attempting to
2430 * open the device.
2431 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002432 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002433 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2434 err = -EOPNOTSUPP;
2435 goto done;
2436 }
2437
Johan Hedberge1d08f42013-10-01 22:44:50 +03002438 /* We need to ensure that no other power on/off work is pending
2439 * before proceeding to call hci_dev_do_open. This is
2440 * particularly important if the setup procedure has not yet
2441 * completed.
2442 */
2443 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2444 cancel_delayed_work(&hdev->power_off);
2445
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002446 /* After this call it is guaranteed that the setup procedure
2447 * has finished. This means that error conditions like RFKILL
2448 * or no valid public or static random address apply.
2449 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002450 flush_workqueue(hdev->req_workqueue);
2451
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002452 /* For controllers not using the management interface and that
2453 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2454 * so that pairing works for them. Once the management interface
2455 * is in use this bit will be cleared again and userspace has
2456 * to explicitly enable it.
2457 */
2458 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2459 !test_bit(HCI_MGMT, &hdev->dev_flags))
2460 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2461
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002462 err = hci_dev_do_open(hdev);
2463
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002464done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002465 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002466 return err;
2467}
2468
Johan Hedbergd7347f32014-07-04 12:37:23 +03002469/* This function requires the caller holds hdev->lock */
2470static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2471{
2472 struct hci_conn_params *p;
2473
2474 list_for_each_entry(p, &hdev->le_conn_params, list)
2475 list_del_init(&p->action);
2476
2477 BT_DBG("All LE pending actions cleared");
2478}
2479
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480static int hci_dev_do_close(struct hci_dev *hdev)
2481{
2482 BT_DBG("%s %p", hdev->name, hdev);
2483
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002484 cancel_delayed_work(&hdev->power_off);
2485
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 hci_req_cancel(hdev, ENODEV);
2487 hci_req_lock(hdev);
2488
2489 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002490 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 hci_req_unlock(hdev);
2492 return 0;
2493 }
2494
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002495 /* Flush RX and TX works */
2496 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002497 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002499 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002500 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002501 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002502 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002503 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002504 }
2505
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002506 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002507 cancel_delayed_work(&hdev->service_cache);
2508
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002509 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002510
2511 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2512 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002513
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002514 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002515 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002517 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002518 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
2520 hci_notify(hdev, HCI_DEV_DOWN);
2521
2522 if (hdev->flush)
2523 hdev->flush(hdev);
2524
2525 /* Reset device */
2526 skb_queue_purge(&hdev->cmd_q);
2527 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002528 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2529 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002530 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002532 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 clear_bit(HCI_INIT, &hdev->flags);
2534 }
2535
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002536 /* flush cmd work */
2537 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
2539 /* Drop queues */
2540 skb_queue_purge(&hdev->rx_q);
2541 skb_queue_purge(&hdev->cmd_q);
2542 skb_queue_purge(&hdev->raw_q);
2543
2544 /* Drop last sent command */
2545 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002546 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 kfree_skb(hdev->sent_cmd);
2548 hdev->sent_cmd = NULL;
2549 }
2550
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002551 kfree_skb(hdev->recv_evt);
2552 hdev->recv_evt = NULL;
2553
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 /* After this point our queues are empty
2555 * and no tasks are scheduled. */
2556 hdev->close(hdev);
2557
Johan Hedberg35b973c2013-03-15 17:06:59 -05002558 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002559 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002560 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2561
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002562 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2563 if (hdev->dev_type == HCI_BREDR) {
2564 hci_dev_lock(hdev);
2565 mgmt_powered(hdev, 0);
2566 hci_dev_unlock(hdev);
2567 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002568 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002569
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002570 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002571 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002572
Johan Hedberge59fda82012-02-22 18:11:53 +02002573 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002574 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002575 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002576
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 hci_req_unlock(hdev);
2578
2579 hci_dev_put(hdev);
2580 return 0;
2581}
2582
2583int hci_dev_close(__u16 dev)
2584{
2585 struct hci_dev *hdev;
2586 int err;
2587
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002588 hdev = hci_dev_get(dev);
2589 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002591
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002592 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2593 err = -EBUSY;
2594 goto done;
2595 }
2596
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002597 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2598 cancel_delayed_work(&hdev->power_off);
2599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002601
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002602done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 hci_dev_put(hdev);
2604 return err;
2605}
2606
2607int hci_dev_reset(__u16 dev)
2608{
2609 struct hci_dev *hdev;
2610 int ret = 0;
2611
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002612 hdev = hci_dev_get(dev);
2613 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 return -ENODEV;
2615
2616 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
Marcel Holtmann808a0492013-08-26 20:57:58 -07002618 if (!test_bit(HCI_UP, &hdev->flags)) {
2619 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002623 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2624 ret = -EBUSY;
2625 goto done;
2626 }
2627
Marcel Holtmann4a964402014-07-02 19:10:33 +02002628 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002629 ret = -EOPNOTSUPP;
2630 goto done;
2631 }
2632
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 /* Drop queues */
2634 skb_queue_purge(&hdev->rx_q);
2635 skb_queue_purge(&hdev->cmd_q);
2636
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002637 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002638 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002640 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641
2642 if (hdev->flush)
2643 hdev->flush(hdev);
2644
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002645 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002646 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002648 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649
2650done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 hci_req_unlock(hdev);
2652 hci_dev_put(hdev);
2653 return ret;
2654}
2655
2656int hci_dev_reset_stat(__u16 dev)
2657{
2658 struct hci_dev *hdev;
2659 int ret = 0;
2660
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002661 hdev = hci_dev_get(dev);
2662 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 return -ENODEV;
2664
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002665 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2666 ret = -EBUSY;
2667 goto done;
2668 }
2669
Marcel Holtmann4a964402014-07-02 19:10:33 +02002670 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002671 ret = -EOPNOTSUPP;
2672 goto done;
2673 }
2674
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2676
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002677done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 return ret;
2680}
2681
Johan Hedberg123abc02014-07-10 12:09:07 +03002682static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2683{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002684 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002685
2686 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2687
2688 if ((scan & SCAN_PAGE))
2689 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2690 &hdev->dev_flags);
2691 else
2692 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2693 &hdev->dev_flags);
2694
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002695 if ((scan & SCAN_INQUIRY)) {
2696 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2697 &hdev->dev_flags);
2698 } else {
2699 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2700 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2701 &hdev->dev_flags);
2702 }
2703
Johan Hedberg123abc02014-07-10 12:09:07 +03002704 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2705 return;
2706
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002707 if (conn_changed || discov_changed) {
2708 /* In case this was disabled through mgmt */
2709 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2710
2711 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2712 mgmt_update_adv_data(hdev);
2713
Johan Hedberg123abc02014-07-10 12:09:07 +03002714 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002715 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002716}
2717
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718int hci_dev_cmd(unsigned int cmd, void __user *arg)
2719{
2720 struct hci_dev *hdev;
2721 struct hci_dev_req dr;
2722 int err = 0;
2723
2724 if (copy_from_user(&dr, arg, sizeof(dr)))
2725 return -EFAULT;
2726
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002727 hdev = hci_dev_get(dr.dev_id);
2728 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 return -ENODEV;
2730
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002731 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2732 err = -EBUSY;
2733 goto done;
2734 }
2735
Marcel Holtmann4a964402014-07-02 19:10:33 +02002736 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002737 err = -EOPNOTSUPP;
2738 goto done;
2739 }
2740
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002741 if (hdev->dev_type != HCI_BREDR) {
2742 err = -EOPNOTSUPP;
2743 goto done;
2744 }
2745
Johan Hedberg56f87902013-10-02 13:43:13 +03002746 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2747 err = -EOPNOTSUPP;
2748 goto done;
2749 }
2750
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 switch (cmd) {
2752 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002753 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2754 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 break;
2756
2757 case HCISETENCRYPT:
2758 if (!lmp_encrypt_capable(hdev)) {
2759 err = -EOPNOTSUPP;
2760 break;
2761 }
2762
2763 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2764 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002765 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2766 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 if (err)
2768 break;
2769 }
2770
Johan Hedberg01178cd2013-03-05 20:37:41 +02002771 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2772 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 break;
2774
2775 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002776 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2777 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002778
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002779 /* Ensure that the connectable and discoverable states
2780 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002781 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002782 if (!err)
2783 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 break;
2785
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002786 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002787 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2788 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002789 break;
2790
2791 case HCISETLINKMODE:
2792 hdev->link_mode = ((__u16) dr.dev_opt) &
2793 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2794 break;
2795
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 case HCISETPTYPE:
2797 hdev->pkt_type = (__u16) dr.dev_opt;
2798 break;
2799
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002801 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2802 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 break;
2804
2805 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002806 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2807 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 break;
2809
2810 default:
2811 err = -EINVAL;
2812 break;
2813 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002814
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002815done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 hci_dev_put(hdev);
2817 return err;
2818}
2819
2820int hci_get_dev_list(void __user *arg)
2821{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002822 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 struct hci_dev_list_req *dl;
2824 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 int n = 0, size, err;
2826 __u16 dev_num;
2827
2828 if (get_user(dev_num, (__u16 __user *) arg))
2829 return -EFAULT;
2830
2831 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2832 return -EINVAL;
2833
2834 size = sizeof(*dl) + dev_num * sizeof(*dr);
2835
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002836 dl = kzalloc(size, GFP_KERNEL);
2837 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 return -ENOMEM;
2839
2840 dr = dl->dev_req;
2841
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002842 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002843 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002844 unsigned long flags = hdev->flags;
2845
2846 /* When the auto-off is configured it means the transport
2847 * is running, but in that case still indicate that the
2848 * device is actually down.
2849 */
2850 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2851 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002852
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002854 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002855
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 if (++n >= dev_num)
2857 break;
2858 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002859 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860
2861 dl->dev_num = n;
2862 size = sizeof(*dl) + n * sizeof(*dr);
2863
2864 err = copy_to_user(arg, dl, size);
2865 kfree(dl);
2866
2867 return err ? -EFAULT : 0;
2868}
2869
2870int hci_get_dev_info(void __user *arg)
2871{
2872 struct hci_dev *hdev;
2873 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002874 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 int err = 0;
2876
2877 if (copy_from_user(&di, arg, sizeof(di)))
2878 return -EFAULT;
2879
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002880 hdev = hci_dev_get(di.dev_id);
2881 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 return -ENODEV;
2883
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002884 /* When the auto-off is configured it means the transport
2885 * is running, but in that case still indicate that the
2886 * device is actually down.
2887 */
2888 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2889 flags = hdev->flags & ~BIT(HCI_UP);
2890 else
2891 flags = hdev->flags;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002892
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 strcpy(di.name, hdev->name);
2894 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002895 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002896 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002898 if (lmp_bredr_capable(hdev)) {
2899 di.acl_mtu = hdev->acl_mtu;
2900 di.acl_pkts = hdev->acl_pkts;
2901 di.sco_mtu = hdev->sco_mtu;
2902 di.sco_pkts = hdev->sco_pkts;
2903 } else {
2904 di.acl_mtu = hdev->le_mtu;
2905 di.acl_pkts = hdev->le_pkts;
2906 di.sco_mtu = 0;
2907 di.sco_pkts = 0;
2908 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 di.link_policy = hdev->link_policy;
2910 di.link_mode = hdev->link_mode;
2911
2912 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2913 memcpy(&di.features, &hdev->features, sizeof(di.features));
2914
2915 if (copy_to_user(arg, &di, sizeof(di)))
2916 err = -EFAULT;
2917
2918 hci_dev_put(hdev);
2919
2920 return err;
2921}
2922
2923/* ---- Interface to HCI drivers ---- */
2924
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002925static int hci_rfkill_set_block(void *data, bool blocked)
2926{
2927 struct hci_dev *hdev = data;
2928
2929 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2930
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002931 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2932 return -EBUSY;
2933
Johan Hedberg5e130362013-09-13 08:58:17 +03002934 if (blocked) {
2935 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002936 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2937 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002938 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002939 } else {
2940 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002941 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002942
2943 return 0;
2944}
2945
2946static const struct rfkill_ops hci_rfkill_ops = {
2947 .set_block = hci_rfkill_set_block,
2948};
2949
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002950static void hci_power_on(struct work_struct *work)
2951{
2952 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002953 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002954
2955 BT_DBG("%s", hdev->name);
2956
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002957 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002958 if (err < 0) {
2959 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002960 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002961 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002962
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002963 /* During the HCI setup phase, a few error conditions are
2964 * ignored and they need to be checked now. If they are still
2965 * valid, it is important to turn the device back off.
2966 */
2967 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002968 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002969 (hdev->dev_type == HCI_BREDR &&
2970 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2971 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002972 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2973 hci_dev_do_close(hdev);
2974 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002975 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2976 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002977 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002978
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002979 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002980 /* For unconfigured devices, set the HCI_RAW flag
2981 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002982 */
2983 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2984 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002985
2986 /* For fully configured devices, this will send
2987 * the Index Added event. For unconfigured devices,
2988 * it will send Unconfigued Index Added event.
2989 *
2990 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2991 * and no event will be send.
2992 */
2993 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002994 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002995 /* When the controller is now configured, then it
2996 * is important to clear the HCI_RAW flag.
2997 */
2998 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2999 clear_bit(HCI_RAW, &hdev->flags);
3000
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003001 /* Powering on the controller with HCI_CONFIG set only
3002 * happens with the transition from unconfigured to
3003 * configured. This will send the Index Added event.
3004 */
3005 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003006 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003007}
3008
3009static void hci_power_off(struct work_struct *work)
3010{
Johan Hedberg32435532011-11-07 22:16:04 +02003011 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003012 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003013
3014 BT_DBG("%s", hdev->name);
3015
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003016 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003017}
3018
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003019static void hci_discov_off(struct work_struct *work)
3020{
3021 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003022
3023 hdev = container_of(work, struct hci_dev, discov_off.work);
3024
3025 BT_DBG("%s", hdev->name);
3026
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003027 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003028}
3029
Johan Hedberg35f74982014-02-18 17:14:32 +02003030void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003031{
Johan Hedberg48210022013-01-27 00:31:28 +02003032 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003033
Johan Hedberg48210022013-01-27 00:31:28 +02003034 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3035 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003036 kfree(uuid);
3037 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003038}
3039
Johan Hedberg35f74982014-02-18 17:14:32 +02003040void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003041{
3042 struct list_head *p, *n;
3043
3044 list_for_each_safe(p, n, &hdev->link_keys) {
3045 struct link_key *key;
3046
3047 key = list_entry(p, struct link_key, list);
3048
3049 list_del(p);
3050 kfree(key);
3051 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003052}
3053
Johan Hedberg35f74982014-02-18 17:14:32 +02003054void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003055{
3056 struct smp_ltk *k, *tmp;
3057
3058 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3059 list_del(&k->list);
3060 kfree(k);
3061 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003062}
3063
Johan Hedberg970c4e42014-02-18 10:19:33 +02003064void hci_smp_irks_clear(struct hci_dev *hdev)
3065{
3066 struct smp_irk *k, *tmp;
3067
3068 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3069 list_del(&k->list);
3070 kfree(k);
3071 }
3072}
3073
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003074struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3075{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003076 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003077
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003078 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003079 if (bacmp(bdaddr, &k->bdaddr) == 0)
3080 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003081
3082 return NULL;
3083}
3084
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303085static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003086 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003087{
3088 /* Legacy key */
3089 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303090 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003091
3092 /* Debug keys are insecure so don't store them persistently */
3093 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303094 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003095
3096 /* Changed combination key and there's no previous one */
3097 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303098 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003099
3100 /* Security mode 3 case */
3101 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303102 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003103
3104 /* Neither local nor remote side had no-bonding as requirement */
3105 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303106 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003107
3108 /* Local side had dedicated bonding as requirement */
3109 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303110 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003111
3112 /* Remote side had dedicated bonding as requirement */
3113 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303114 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003115
3116 /* If none of the above criteria match, then don't store the key
3117 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303118 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003119}
3120
Johan Hedberg98a0b842014-01-30 19:40:00 -08003121static bool ltk_type_master(u8 type)
3122{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03003123 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08003124}
3125
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003126struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003127 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003128{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003129 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003130
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003131 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003132 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003133 continue;
3134
Johan Hedberg98a0b842014-01-30 19:40:00 -08003135 if (ltk_type_master(k->type) != master)
3136 continue;
3137
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003138 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003139 }
3140
3141 return NULL;
3142}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003143
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003144struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003145 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003146{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003147 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003148
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003149 list_for_each_entry(k, &hdev->long_term_keys, list)
3150 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003151 bacmp(bdaddr, &k->bdaddr) == 0 &&
3152 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003153 return k;
3154
3155 return NULL;
3156}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003157
Johan Hedberg970c4e42014-02-18 10:19:33 +02003158struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3159{
3160 struct smp_irk *irk;
3161
3162 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3163 if (!bacmp(&irk->rpa, rpa))
3164 return irk;
3165 }
3166
3167 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3168 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3169 bacpy(&irk->rpa, rpa);
3170 return irk;
3171 }
3172 }
3173
3174 return NULL;
3175}
3176
3177struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3178 u8 addr_type)
3179{
3180 struct smp_irk *irk;
3181
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003182 /* Identity Address must be public or static random */
3183 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3184 return NULL;
3185
Johan Hedberg970c4e42014-02-18 10:19:33 +02003186 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3187 if (addr_type == irk->addr_type &&
3188 bacmp(bdaddr, &irk->bdaddr) == 0)
3189 return irk;
3190 }
3191
3192 return NULL;
3193}
3194
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003195struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003196 bdaddr_t *bdaddr, u8 *val, u8 type,
3197 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003198{
3199 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303200 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003201
3202 old_key = hci_find_link_key(hdev, bdaddr);
3203 if (old_key) {
3204 old_key_type = old_key->type;
3205 key = old_key;
3206 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003207 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003208 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003209 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003210 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003211 list_add(&key->list, &hdev->link_keys);
3212 }
3213
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003214 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003215
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003216 /* Some buggy controller combinations generate a changed
3217 * combination key for legacy pairing even when there's no
3218 * previous key */
3219 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003220 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003221 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003222 if (conn)
3223 conn->key_type = type;
3224 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003225
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003226 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003227 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003228 key->pin_len = pin_len;
3229
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003230 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003231 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003232 else
3233 key->type = type;
3234
Johan Hedberg7652ff62014-06-24 13:15:49 +03003235 if (persistent)
3236 *persistent = hci_persistent_key(hdev, conn, type,
3237 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003238
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003239 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003240}
3241
Johan Hedbergca9142b2014-02-19 14:57:44 +02003242struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003243 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003244 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003245{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003246 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003247 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003248
Johan Hedberg98a0b842014-01-30 19:40:00 -08003249 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003250 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003251 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003252 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003253 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003254 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003255 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003256 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003257 }
3258
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003259 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003260 key->bdaddr_type = addr_type;
3261 memcpy(key->val, tk, sizeof(key->val));
3262 key->authenticated = authenticated;
3263 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003264 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003265 key->enc_size = enc_size;
3266 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003267
Johan Hedbergca9142b2014-02-19 14:57:44 +02003268 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003269}
3270
Johan Hedbergca9142b2014-02-19 14:57:44 +02003271struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3272 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003273{
3274 struct smp_irk *irk;
3275
3276 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3277 if (!irk) {
3278 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3279 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003280 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003281
3282 bacpy(&irk->bdaddr, bdaddr);
3283 irk->addr_type = addr_type;
3284
3285 list_add(&irk->list, &hdev->identity_resolving_keys);
3286 }
3287
3288 memcpy(irk->val, val, 16);
3289 bacpy(&irk->rpa, rpa);
3290
Johan Hedbergca9142b2014-02-19 14:57:44 +02003291 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003292}
3293
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003294int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3295{
3296 struct link_key *key;
3297
3298 key = hci_find_link_key(hdev, bdaddr);
3299 if (!key)
3300 return -ENOENT;
3301
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003302 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003303
3304 list_del(&key->list);
3305 kfree(key);
3306
3307 return 0;
3308}
3309
Johan Hedberge0b2b272014-02-18 17:14:31 +02003310int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003311{
3312 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003313 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003314
3315 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003316 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003317 continue;
3318
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003319 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003320
3321 list_del(&k->list);
3322 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003323 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003324 }
3325
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003326 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003327}
3328
Johan Hedberga7ec7332014-02-18 17:14:35 +02003329void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3330{
3331 struct smp_irk *k, *tmp;
3332
Johan Hedberg668b7b12014-02-21 16:03:31 +02003333 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003334 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3335 continue;
3336
3337 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3338
3339 list_del(&k->list);
3340 kfree(k);
3341 }
3342}
3343
Ville Tervo6bd32322011-02-16 16:32:41 +02003344/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003345static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003346{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003347 struct hci_dev *hdev = container_of(work, struct hci_dev,
3348 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003349
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003350 if (hdev->sent_cmd) {
3351 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3352 u16 opcode = __le16_to_cpu(sent->opcode);
3353
3354 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3355 } else {
3356 BT_ERR("%s command tx timeout", hdev->name);
3357 }
3358
Ville Tervo6bd32322011-02-16 16:32:41 +02003359 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003360 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003361}
3362
Szymon Janc2763eda2011-03-22 13:12:22 +01003363struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003364 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003365{
3366 struct oob_data *data;
3367
3368 list_for_each_entry(data, &hdev->remote_oob_data, list)
3369 if (bacmp(bdaddr, &data->bdaddr) == 0)
3370 return data;
3371
3372 return NULL;
3373}
3374
3375int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3376{
3377 struct oob_data *data;
3378
3379 data = hci_find_remote_oob_data(hdev, bdaddr);
3380 if (!data)
3381 return -ENOENT;
3382
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003383 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003384
3385 list_del(&data->list);
3386 kfree(data);
3387
3388 return 0;
3389}
3390
Johan Hedberg35f74982014-02-18 17:14:32 +02003391void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003392{
3393 struct oob_data *data, *n;
3394
3395 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3396 list_del(&data->list);
3397 kfree(data);
3398 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003399}
3400
Marcel Holtmann07988722014-01-10 02:07:29 -08003401int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3402 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003403{
3404 struct oob_data *data;
3405
3406 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003407 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003408 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003409 if (!data)
3410 return -ENOMEM;
3411
3412 bacpy(&data->bdaddr, bdaddr);
3413 list_add(&data->list, &hdev->remote_oob_data);
3414 }
3415
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003416 memcpy(data->hash192, hash, sizeof(data->hash192));
3417 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003418
Marcel Holtmann07988722014-01-10 02:07:29 -08003419 memset(data->hash256, 0, sizeof(data->hash256));
3420 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3421
3422 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3423
3424 return 0;
3425}
3426
3427int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3428 u8 *hash192, u8 *randomizer192,
3429 u8 *hash256, u8 *randomizer256)
3430{
3431 struct oob_data *data;
3432
3433 data = hci_find_remote_oob_data(hdev, bdaddr);
3434 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003435 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003436 if (!data)
3437 return -ENOMEM;
3438
3439 bacpy(&data->bdaddr, bdaddr);
3440 list_add(&data->list, &hdev->remote_oob_data);
3441 }
3442
3443 memcpy(data->hash192, hash192, sizeof(data->hash192));
3444 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3445
3446 memcpy(data->hash256, hash256, sizeof(data->hash256));
3447 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3448
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003449 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003450
3451 return 0;
3452}
3453
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003454struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003455 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003456{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003457 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003458
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003459 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003460 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003461 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003462 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003463
3464 return NULL;
3465}
3466
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003467void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003468{
3469 struct list_head *p, *n;
3470
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003471 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003472 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003473
3474 list_del(p);
3475 kfree(b);
3476 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003477}
3478
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003479int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003480{
3481 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003482
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003483 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003484 return -EBADF;
3485
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003486 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003487 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003488
3489 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003490 if (!entry)
3491 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003492
3493 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003494 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003495
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003496 list_add(&entry->list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003497
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003498 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003499}
3500
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003501int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003502{
3503 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003504
Johan Hedberg35f74982014-02-18 17:14:32 +02003505 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003506 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003507 return 0;
3508 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003509
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003510 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003511 if (!entry)
3512 return -ENOENT;
3513
3514 list_del(&entry->list);
3515 kfree(entry);
3516
3517 return 0;
3518}
3519
Andre Guedes15819a72014-02-03 13:56:18 -03003520/* This function requires the caller holds hdev->lock */
3521struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3522 bdaddr_t *addr, u8 addr_type)
3523{
3524 struct hci_conn_params *params;
3525
Johan Hedberg738f6182014-07-03 19:33:51 +03003526 /* The conn params list only contains identity addresses */
3527 if (!hci_is_identity_address(addr, addr_type))
3528 return NULL;
3529
Andre Guedes15819a72014-02-03 13:56:18 -03003530 list_for_each_entry(params, &hdev->le_conn_params, list) {
3531 if (bacmp(&params->addr, addr) == 0 &&
3532 params->addr_type == addr_type) {
3533 return params;
3534 }
3535 }
3536
3537 return NULL;
3538}
3539
Andre Guedescef952c2014-02-26 20:21:49 -03003540static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3541{
3542 struct hci_conn *conn;
3543
3544 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3545 if (!conn)
3546 return false;
3547
3548 if (conn->dst_type != type)
3549 return false;
3550
3551 if (conn->state != BT_CONNECTED)
3552 return false;
3553
3554 return true;
3555}
3556
Andre Guedes15819a72014-02-03 13:56:18 -03003557/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003558struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3559 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003560{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003561 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003562
Johan Hedberg738f6182014-07-03 19:33:51 +03003563 /* The list only contains identity addresses */
3564 if (!hci_is_identity_address(addr, addr_type))
3565 return NULL;
3566
Johan Hedberg501f8822014-07-04 12:37:26 +03003567 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003568 if (bacmp(&param->addr, addr) == 0 &&
3569 param->addr_type == addr_type)
3570 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003571 }
3572
3573 return NULL;
3574}
3575
3576/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003577struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3578 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003579{
3580 struct hci_conn_params *params;
3581
Johan Hedbergc46245b2014-07-02 17:37:33 +03003582 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003583 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003584
3585 params = hci_conn_params_lookup(hdev, addr, addr_type);
3586 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003587 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003588
3589 params = kzalloc(sizeof(*params), GFP_KERNEL);
3590 if (!params) {
3591 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003592 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003593 }
3594
3595 bacpy(&params->addr, addr);
3596 params->addr_type = addr_type;
3597
3598 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003599 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003600
3601 params->conn_min_interval = hdev->le_conn_min_interval;
3602 params->conn_max_interval = hdev->le_conn_max_interval;
3603 params->conn_latency = hdev->le_conn_latency;
3604 params->supervision_timeout = hdev->le_supv_timeout;
3605 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3606
3607 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3608
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003609 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003610}
3611
3612/* This function requires the caller holds hdev->lock */
3613int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003614 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003615{
3616 struct hci_conn_params *params;
3617
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003618 params = hci_conn_params_add(hdev, addr, addr_type);
3619 if (!params)
3620 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003621
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003622 if (params->auto_connect == auto_connect)
3623 return 0;
3624
Johan Hedberg95305ba2014-07-04 12:37:21 +03003625 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003626
Andre Guedescef952c2014-02-26 20:21:49 -03003627 switch (auto_connect) {
3628 case HCI_AUTO_CONN_DISABLED:
3629 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003630 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003631 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003632 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003633 list_add(&params->action, &hdev->pend_le_reports);
3634 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003635 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003636 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003637 if (!is_connected(hdev, addr, addr_type)) {
3638 list_add(&params->action, &hdev->pend_le_conns);
3639 hci_update_background_scan(hdev);
3640 }
Andre Guedescef952c2014-02-26 20:21:49 -03003641 break;
3642 }
Andre Guedes15819a72014-02-03 13:56:18 -03003643
Johan Hedberg851efca2014-07-02 22:42:00 +03003644 params->auto_connect = auto_connect;
3645
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003646 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3647 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003648
3649 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003650}
3651
3652/* This function requires the caller holds hdev->lock */
3653void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3654{
3655 struct hci_conn_params *params;
3656
3657 params = hci_conn_params_lookup(hdev, addr, addr_type);
3658 if (!params)
3659 return;
3660
Johan Hedberg95305ba2014-07-04 12:37:21 +03003661 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003662 list_del(&params->list);
3663 kfree(params);
3664
Johan Hedberg95305ba2014-07-04 12:37:21 +03003665 hci_update_background_scan(hdev);
3666
Andre Guedes15819a72014-02-03 13:56:18 -03003667 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3668}
3669
3670/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003671void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3672{
3673 struct hci_conn_params *params, *tmp;
3674
3675 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3676 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3677 continue;
3678 list_del(&params->list);
3679 kfree(params);
3680 }
3681
3682 BT_DBG("All LE disabled connection parameters were removed");
3683}
3684
3685/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003686void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003687{
3688 struct hci_conn_params *params, *tmp;
3689
3690 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003691 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003692 list_del(&params->list);
3693 kfree(params);
3694 }
3695
Johan Hedberga2f41a82014-07-04 12:37:19 +03003696 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003697
Andre Guedes15819a72014-02-03 13:56:18 -03003698 BT_DBG("All LE connection parameters were removed");
3699}
3700
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003701static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003702{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003703 if (status) {
3704 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003705
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003706 hci_dev_lock(hdev);
3707 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3708 hci_dev_unlock(hdev);
3709 return;
3710 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003711}
3712
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003713static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003714{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003715 /* General inquiry access code (GIAC) */
3716 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3717 struct hci_request req;
3718 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003719 int err;
3720
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003721 if (status) {
3722 BT_ERR("Failed to disable LE scanning: status %d", status);
3723 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003724 }
3725
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003726 switch (hdev->discovery.type) {
3727 case DISCOV_TYPE_LE:
3728 hci_dev_lock(hdev);
3729 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3730 hci_dev_unlock(hdev);
3731 break;
3732
3733 case DISCOV_TYPE_INTERLEAVED:
3734 hci_req_init(&req, hdev);
3735
3736 memset(&cp, 0, sizeof(cp));
3737 memcpy(&cp.lap, lap, sizeof(cp.lap));
3738 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3739 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3740
3741 hci_dev_lock(hdev);
3742
3743 hci_inquiry_cache_flush(hdev);
3744
3745 err = hci_req_run(&req, inquiry_complete);
3746 if (err) {
3747 BT_ERR("Inquiry request failed: err %d", err);
3748 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3749 }
3750
3751 hci_dev_unlock(hdev);
3752 break;
3753 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003754}
3755
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003756static void le_scan_disable_work(struct work_struct *work)
3757{
3758 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003759 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003760 struct hci_request req;
3761 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003762
3763 BT_DBG("%s", hdev->name);
3764
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003765 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003766
Andre Guedesb1efcc22014-02-26 20:21:40 -03003767 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003768
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003769 err = hci_req_run(&req, le_scan_disable_work_complete);
3770 if (err)
3771 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003772}
3773
Johan Hedberg8d972502014-02-28 12:54:14 +02003774static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3775{
3776 struct hci_dev *hdev = req->hdev;
3777
3778 /* If we're advertising or initiating an LE connection we can't
3779 * go ahead and change the random address at this time. This is
3780 * because the eventual initiator address used for the
3781 * subsequently created connection will be undefined (some
3782 * controllers use the new address and others the one we had
3783 * when the operation started).
3784 *
3785 * In this kind of scenario skip the update and let the random
3786 * address be updated at the next cycle.
3787 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003788 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003789 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3790 BT_DBG("Deferring random address update");
3791 return;
3792 }
3793
3794 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3795}
3796
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003797int hci_update_random_address(struct hci_request *req, bool require_privacy,
3798 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003799{
3800 struct hci_dev *hdev = req->hdev;
3801 int err;
3802
3803 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003804 * current RPA has expired or there is something else than
3805 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003806 */
3807 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003808 int to;
3809
3810 *own_addr_type = ADDR_LE_DEV_RANDOM;
3811
3812 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003813 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003814 return 0;
3815
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003816 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003817 if (err < 0) {
3818 BT_ERR("%s failed to generate new RPA", hdev->name);
3819 return err;
3820 }
3821
Johan Hedberg8d972502014-02-28 12:54:14 +02003822 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003823
3824 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3825 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3826
3827 return 0;
3828 }
3829
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003830 /* In case of required privacy without resolvable private address,
3831 * use an unresolvable private address. This is useful for active
3832 * scanning and non-connectable advertising.
3833 */
3834 if (require_privacy) {
3835 bdaddr_t urpa;
3836
3837 get_random_bytes(&urpa, 6);
3838 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3839
3840 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003841 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003842 return 0;
3843 }
3844
Johan Hedbergebd3a742014-02-23 19:42:21 +02003845 /* If forcing static address is in use or there is no public
3846 * address use the static address as random address (but skip
3847 * the HCI command if the current random address is already the
3848 * static one.
3849 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003850 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003851 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3852 *own_addr_type = ADDR_LE_DEV_RANDOM;
3853 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3854 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3855 &hdev->static_addr);
3856 return 0;
3857 }
3858
3859 /* Neither privacy nor static address is being used so use a
3860 * public address.
3861 */
3862 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3863
3864 return 0;
3865}
3866
Johan Hedberga1f4c312014-02-27 14:05:41 +02003867/* Copy the Identity Address of the controller.
3868 *
3869 * If the controller has a public BD_ADDR, then by default use that one.
3870 * If this is a LE only controller without a public address, default to
3871 * the static random address.
3872 *
3873 * For debugging purposes it is possible to force controllers with a
3874 * public address to use the static random address instead.
3875 */
3876void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3877 u8 *bdaddr_type)
3878{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003879 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003880 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3881 bacpy(bdaddr, &hdev->static_addr);
3882 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3883 } else {
3884 bacpy(bdaddr, &hdev->bdaddr);
3885 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3886 }
3887}
3888
David Herrmann9be0dab2012-04-22 14:39:57 +02003889/* Alloc HCI device */
3890struct hci_dev *hci_alloc_dev(void)
3891{
3892 struct hci_dev *hdev;
3893
3894 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3895 if (!hdev)
3896 return NULL;
3897
David Herrmannb1b813d2012-04-22 14:39:58 +02003898 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3899 hdev->esco_type = (ESCO_HV1);
3900 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003901 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3902 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003903 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003904 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3905 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003906
David Herrmannb1b813d2012-04-22 14:39:58 +02003907 hdev->sniff_max_interval = 800;
3908 hdev->sniff_min_interval = 80;
3909
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003910 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003911 hdev->le_scan_interval = 0x0060;
3912 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003913 hdev->le_conn_min_interval = 0x0028;
3914 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003915 hdev->le_conn_latency = 0x0000;
3916 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003917
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003918 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003919 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003920 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3921 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003922
David Herrmannb1b813d2012-04-22 14:39:58 +02003923 mutex_init(&hdev->lock);
3924 mutex_init(&hdev->req_lock);
3925
3926 INIT_LIST_HEAD(&hdev->mgmt_pending);
3927 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003928 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003929 INIT_LIST_HEAD(&hdev->uuids);
3930 INIT_LIST_HEAD(&hdev->link_keys);
3931 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003932 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003933 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003934 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003935 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003936 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003937 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003938 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003939
3940 INIT_WORK(&hdev->rx_work, hci_rx_work);
3941 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3942 INIT_WORK(&hdev->tx_work, hci_tx_work);
3943 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003944
David Herrmannb1b813d2012-04-22 14:39:58 +02003945 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3946 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3947 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3948
David Herrmannb1b813d2012-04-22 14:39:58 +02003949 skb_queue_head_init(&hdev->rx_q);
3950 skb_queue_head_init(&hdev->cmd_q);
3951 skb_queue_head_init(&hdev->raw_q);
3952
3953 init_waitqueue_head(&hdev->req_wait_q);
3954
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003955 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003956
David Herrmannb1b813d2012-04-22 14:39:58 +02003957 hci_init_sysfs(hdev);
3958 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003959
3960 return hdev;
3961}
3962EXPORT_SYMBOL(hci_alloc_dev);
3963
3964/* Free HCI device */
3965void hci_free_dev(struct hci_dev *hdev)
3966{
David Herrmann9be0dab2012-04-22 14:39:57 +02003967 /* will free via device release */
3968 put_device(&hdev->dev);
3969}
3970EXPORT_SYMBOL(hci_free_dev);
3971
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972/* Register HCI device */
3973int hci_register_dev(struct hci_dev *hdev)
3974{
David Herrmannb1b813d2012-04-22 14:39:58 +02003975 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976
Marcel Holtmann74292d52014-07-06 15:50:27 +02003977 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 return -EINVAL;
3979
Mat Martineau08add512011-11-02 16:18:36 -07003980 /* Do not allow HCI_AMP devices to register at index 0,
3981 * so the index can be used as the AMP controller ID.
3982 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003983 switch (hdev->dev_type) {
3984 case HCI_BREDR:
3985 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3986 break;
3987 case HCI_AMP:
3988 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3989 break;
3990 default:
3991 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003993
Sasha Levin3df92b32012-05-27 22:36:56 +02003994 if (id < 0)
3995 return id;
3996
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 sprintf(hdev->name, "hci%d", id);
3998 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003999
4000 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4001
Kees Cookd8537542013-07-03 15:04:57 -07004002 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4003 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004004 if (!hdev->workqueue) {
4005 error = -ENOMEM;
4006 goto err;
4007 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004008
Kees Cookd8537542013-07-03 15:04:57 -07004009 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4010 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004011 if (!hdev->req_workqueue) {
4012 destroy_workqueue(hdev->workqueue);
4013 error = -ENOMEM;
4014 goto err;
4015 }
4016
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004017 if (!IS_ERR_OR_NULL(bt_debugfs))
4018 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4019
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004020 dev_set_name(&hdev->dev, "%s", hdev->name);
4021
Johan Hedberg99780a72014-02-18 10:40:07 +02004022 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4023 CRYPTO_ALG_ASYNC);
4024 if (IS_ERR(hdev->tfm_aes)) {
4025 BT_ERR("Unable to create crypto context");
4026 error = PTR_ERR(hdev->tfm_aes);
4027 hdev->tfm_aes = NULL;
4028 goto err_wqueue;
4029 }
4030
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004031 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004032 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02004033 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004035 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004036 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4037 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004038 if (hdev->rfkill) {
4039 if (rfkill_register(hdev->rfkill) < 0) {
4040 rfkill_destroy(hdev->rfkill);
4041 hdev->rfkill = NULL;
4042 }
4043 }
4044
Johan Hedberg5e130362013-09-13 08:58:17 +03004045 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4046 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4047
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004048 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004049 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004050
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004051 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004052 /* Assume BR/EDR support until proven otherwise (such as
4053 * through reading supported features during init.
4054 */
4055 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4056 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004057
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004058 write_lock(&hci_dev_list_lock);
4059 list_add(&hdev->list, &hci_dev_list);
4060 write_unlock(&hci_dev_list_lock);
4061
Marcel Holtmann4a964402014-07-02 19:10:33 +02004062 /* Devices that are marked for raw-only usage are unconfigured
4063 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004064 */
4065 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004066 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004067
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004069 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070
Johan Hedberg19202572013-01-14 22:33:51 +02004071 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004072
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004074
Johan Hedberg99780a72014-02-18 10:40:07 +02004075err_tfm:
4076 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004077err_wqueue:
4078 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004079 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004080err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004081 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004082
David Herrmann33ca9542011-10-08 14:58:49 +02004083 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084}
4085EXPORT_SYMBOL(hci_register_dev);
4086
4087/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004088void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089{
Sasha Levin3df92b32012-05-27 22:36:56 +02004090 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004091
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004092 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093
Johan Hovold94324962012-03-15 14:48:41 +01004094 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4095
Sasha Levin3df92b32012-05-27 22:36:56 +02004096 id = hdev->id;
4097
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004098 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004100 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101
4102 hci_dev_do_close(hdev);
4103
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304104 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004105 kfree_skb(hdev->reassembly[i]);
4106
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004107 cancel_work_sync(&hdev->power_on);
4108
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004109 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004110 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4111 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004112 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004113 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004114 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004115 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004116
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004117 /* mgmt_index_removed should take care of emptying the
4118 * pending list */
4119 BUG_ON(!list_empty(&hdev->mgmt_pending));
4120
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121 hci_notify(hdev, HCI_DEV_UNREG);
4122
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004123 if (hdev->rfkill) {
4124 rfkill_unregister(hdev->rfkill);
4125 rfkill_destroy(hdev->rfkill);
4126 }
4127
Johan Hedberg99780a72014-02-18 10:40:07 +02004128 if (hdev->tfm_aes)
4129 crypto_free_blkcipher(hdev->tfm_aes);
4130
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004131 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004132
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004133 debugfs_remove_recursive(hdev->debugfs);
4134
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004135 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004136 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004137
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004138 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004139 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004140 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004141 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004142 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004143 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004144 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004145 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004146 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004147 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004148 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004149
David Herrmanndc946bd2012-01-07 15:47:24 +01004150 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004151
4152 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153}
4154EXPORT_SYMBOL(hci_unregister_dev);
4155
4156/* Suspend HCI device */
4157int hci_suspend_dev(struct hci_dev *hdev)
4158{
4159 hci_notify(hdev, HCI_DEV_SUSPEND);
4160 return 0;
4161}
4162EXPORT_SYMBOL(hci_suspend_dev);
4163
4164/* Resume HCI device */
4165int hci_resume_dev(struct hci_dev *hdev)
4166{
4167 hci_notify(hdev, HCI_DEV_RESUME);
4168 return 0;
4169}
4170EXPORT_SYMBOL(hci_resume_dev);
4171
Marcel Holtmann76bca882009-11-18 00:40:39 +01004172/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004173int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004174{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004175 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004176 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004177 kfree_skb(skb);
4178 return -ENXIO;
4179 }
4180
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004181 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004182 bt_cb(skb)->incoming = 1;
4183
4184 /* Time stamp */
4185 __net_timestamp(skb);
4186
Marcel Holtmann76bca882009-11-18 00:40:39 +01004187 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004188 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004189
Marcel Holtmann76bca882009-11-18 00:40:39 +01004190 return 0;
4191}
4192EXPORT_SYMBOL(hci_recv_frame);
4193
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304194static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004195 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304196{
4197 int len = 0;
4198 int hlen = 0;
4199 int remain = count;
4200 struct sk_buff *skb;
4201 struct bt_skb_cb *scb;
4202
4203 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004204 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304205 return -EILSEQ;
4206
4207 skb = hdev->reassembly[index];
4208
4209 if (!skb) {
4210 switch (type) {
4211 case HCI_ACLDATA_PKT:
4212 len = HCI_MAX_FRAME_SIZE;
4213 hlen = HCI_ACL_HDR_SIZE;
4214 break;
4215 case HCI_EVENT_PKT:
4216 len = HCI_MAX_EVENT_SIZE;
4217 hlen = HCI_EVENT_HDR_SIZE;
4218 break;
4219 case HCI_SCODATA_PKT:
4220 len = HCI_MAX_SCO_SIZE;
4221 hlen = HCI_SCO_HDR_SIZE;
4222 break;
4223 }
4224
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004225 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304226 if (!skb)
4227 return -ENOMEM;
4228
4229 scb = (void *) skb->cb;
4230 scb->expect = hlen;
4231 scb->pkt_type = type;
4232
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304233 hdev->reassembly[index] = skb;
4234 }
4235
4236 while (count) {
4237 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004238 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304239
4240 memcpy(skb_put(skb, len), data, len);
4241
4242 count -= len;
4243 data += len;
4244 scb->expect -= len;
4245 remain = count;
4246
4247 switch (type) {
4248 case HCI_EVENT_PKT:
4249 if (skb->len == HCI_EVENT_HDR_SIZE) {
4250 struct hci_event_hdr *h = hci_event_hdr(skb);
4251 scb->expect = h->plen;
4252
4253 if (skb_tailroom(skb) < scb->expect) {
4254 kfree_skb(skb);
4255 hdev->reassembly[index] = NULL;
4256 return -ENOMEM;
4257 }
4258 }
4259 break;
4260
4261 case HCI_ACLDATA_PKT:
4262 if (skb->len == HCI_ACL_HDR_SIZE) {
4263 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4264 scb->expect = __le16_to_cpu(h->dlen);
4265
4266 if (skb_tailroom(skb) < scb->expect) {
4267 kfree_skb(skb);
4268 hdev->reassembly[index] = NULL;
4269 return -ENOMEM;
4270 }
4271 }
4272 break;
4273
4274 case HCI_SCODATA_PKT:
4275 if (skb->len == HCI_SCO_HDR_SIZE) {
4276 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4277 scb->expect = h->dlen;
4278
4279 if (skb_tailroom(skb) < scb->expect) {
4280 kfree_skb(skb);
4281 hdev->reassembly[index] = NULL;
4282 return -ENOMEM;
4283 }
4284 }
4285 break;
4286 }
4287
4288 if (scb->expect == 0) {
4289 /* Complete frame */
4290
4291 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004292 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304293
4294 hdev->reassembly[index] = NULL;
4295 return remain;
4296 }
4297 }
4298
4299 return remain;
4300}
4301
Marcel Holtmannef222012007-07-11 06:42:04 +02004302int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4303{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304304 int rem = 0;
4305
Marcel Holtmannef222012007-07-11 06:42:04 +02004306 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4307 return -EILSEQ;
4308
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004309 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004310 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304311 if (rem < 0)
4312 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004313
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304314 data += (count - rem);
4315 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004316 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004317
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304318 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004319}
4320EXPORT_SYMBOL(hci_recv_fragment);
4321
Suraj Sumangala99811512010-07-14 13:02:19 +05304322#define STREAM_REASSEMBLY 0
4323
4324int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4325{
4326 int type;
4327 int rem = 0;
4328
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004329 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304330 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4331
4332 if (!skb) {
4333 struct { char type; } *pkt;
4334
4335 /* Start of the frame */
4336 pkt = data;
4337 type = pkt->type;
4338
4339 data++;
4340 count--;
4341 } else
4342 type = bt_cb(skb)->pkt_type;
4343
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004344 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004345 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304346 if (rem < 0)
4347 return rem;
4348
4349 data += (count - rem);
4350 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004351 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304352
4353 return rem;
4354}
4355EXPORT_SYMBOL(hci_recv_stream_fragment);
4356
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357/* ---- Interface to upper protocols ---- */
4358
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359int hci_register_cb(struct hci_cb *cb)
4360{
4361 BT_DBG("%p name %s", cb, cb->name);
4362
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004363 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004365 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366
4367 return 0;
4368}
4369EXPORT_SYMBOL(hci_register_cb);
4370
4371int hci_unregister_cb(struct hci_cb *cb)
4372{
4373 BT_DBG("%p name %s", cb, cb->name);
4374
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004375 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004377 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
4379 return 0;
4380}
4381EXPORT_SYMBOL(hci_unregister_cb);
4382
Marcel Holtmann51086992013-10-10 14:54:19 -07004383static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004385 int err;
4386
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004387 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004389 /* Time stamp */
4390 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004392 /* Send copy to monitor */
4393 hci_send_to_monitor(hdev, skb);
4394
4395 if (atomic_read(&hdev->promisc)) {
4396 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004397 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398 }
4399
4400 /* Get rid of skb owner, prior to sending to the driver. */
4401 skb_orphan(skb);
4402
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004403 err = hdev->send(hdev, skb);
4404 if (err < 0) {
4405 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4406 kfree_skb(skb);
4407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408}
4409
Johan Hedberg3119ae92013-03-05 20:37:44 +02004410void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4411{
4412 skb_queue_head_init(&req->cmd_q);
4413 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004414 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004415}
4416
4417int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4418{
4419 struct hci_dev *hdev = req->hdev;
4420 struct sk_buff *skb;
4421 unsigned long flags;
4422
4423 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4424
Andre Guedes5d73e032013-03-08 11:20:16 -03004425 /* If an error occured during request building, remove all HCI
4426 * commands queued on the HCI request queue.
4427 */
4428 if (req->err) {
4429 skb_queue_purge(&req->cmd_q);
4430 return req->err;
4431 }
4432
Johan Hedberg3119ae92013-03-05 20:37:44 +02004433 /* Do not allow empty requests */
4434 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004435 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004436
4437 skb = skb_peek_tail(&req->cmd_q);
4438 bt_cb(skb)->req.complete = complete;
4439
4440 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4441 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4442 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4443
4444 queue_work(hdev->workqueue, &hdev->cmd_work);
4445
4446 return 0;
4447}
4448
Marcel Holtmann899de762014-07-11 05:51:58 +02004449bool hci_req_pending(struct hci_dev *hdev)
4450{
4451 return (hdev->req_status == HCI_REQ_PEND);
4452}
4453
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004454static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004455 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456{
4457 int len = HCI_COMMAND_HDR_SIZE + plen;
4458 struct hci_command_hdr *hdr;
4459 struct sk_buff *skb;
4460
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004462 if (!skb)
4463 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464
4465 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004466 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 hdr->plen = plen;
4468
4469 if (plen)
4470 memcpy(skb_put(skb, plen), param, plen);
4471
4472 BT_DBG("skb len %d", skb->len);
4473
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004474 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004475
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004476 return skb;
4477}
4478
4479/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004480int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4481 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004482{
4483 struct sk_buff *skb;
4484
4485 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4486
4487 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4488 if (!skb) {
4489 BT_ERR("%s no memory for command", hdev->name);
4490 return -ENOMEM;
4491 }
4492
Johan Hedberg11714b32013-03-05 20:37:47 +02004493 /* Stand-alone HCI commands must be flaged as
4494 * single-command requests.
4495 */
4496 bt_cb(skb)->req.start = true;
4497
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004499 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
4501 return 0;
4502}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503
Johan Hedberg71c76a12013-03-05 20:37:46 +02004504/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004505void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4506 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004507{
4508 struct hci_dev *hdev = req->hdev;
4509 struct sk_buff *skb;
4510
4511 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4512
Andre Guedes34739c12013-03-08 11:20:18 -03004513 /* If an error occured during request building, there is no point in
4514 * queueing the HCI command. We can simply return.
4515 */
4516 if (req->err)
4517 return;
4518
Johan Hedberg71c76a12013-03-05 20:37:46 +02004519 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4520 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004521 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4522 hdev->name, opcode);
4523 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004524 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004525 }
4526
4527 if (skb_queue_empty(&req->cmd_q))
4528 bt_cb(skb)->req.start = true;
4529
Johan Hedberg02350a72013-04-03 21:50:29 +03004530 bt_cb(skb)->req.event = event;
4531
Johan Hedberg71c76a12013-03-05 20:37:46 +02004532 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004533}
4534
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004535void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4536 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004537{
4538 hci_req_add_ev(req, opcode, plen, param, 0);
4539}
4540
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004542void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543{
4544 struct hci_command_hdr *hdr;
4545
4546 if (!hdev->sent_cmd)
4547 return NULL;
4548
4549 hdr = (void *) hdev->sent_cmd->data;
4550
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004551 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 return NULL;
4553
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004554 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555
4556 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4557}
4558
4559/* Send ACL data */
4560static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4561{
4562 struct hci_acl_hdr *hdr;
4563 int len = skb->len;
4564
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004565 skb_push(skb, HCI_ACL_HDR_SIZE);
4566 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004567 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004568 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4569 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570}
4571
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004572static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004573 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004575 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576 struct hci_dev *hdev = conn->hdev;
4577 struct sk_buff *list;
4578
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004579 skb->len = skb_headlen(skb);
4580 skb->data_len = 0;
4581
4582 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004583
4584 switch (hdev->dev_type) {
4585 case HCI_BREDR:
4586 hci_add_acl_hdr(skb, conn->handle, flags);
4587 break;
4588 case HCI_AMP:
4589 hci_add_acl_hdr(skb, chan->handle, flags);
4590 break;
4591 default:
4592 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4593 return;
4594 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004595
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004596 list = skb_shinfo(skb)->frag_list;
4597 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 /* Non fragmented */
4599 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4600
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004601 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602 } else {
4603 /* Fragmented */
4604 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4605
4606 skb_shinfo(skb)->frag_list = NULL;
4607
4608 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004609 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004611 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004612
4613 flags &= ~ACL_START;
4614 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615 do {
4616 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004617
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004618 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004619 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620
4621 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4622
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004623 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624 } while (list);
4625
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004626 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004628}
4629
4630void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4631{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004632 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004633
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004634 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004635
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004636 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004638 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640
4641/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004642void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643{
4644 struct hci_dev *hdev = conn->hdev;
4645 struct hci_sco_hdr hdr;
4646
4647 BT_DBG("%s len %d", hdev->name, skb->len);
4648
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004649 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650 hdr.dlen = skb->len;
4651
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004652 skb_push(skb, HCI_SCO_HDR_SIZE);
4653 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004654 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004656 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004657
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004659 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661
4662/* ---- HCI TX task (outgoing data) ---- */
4663
4664/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004665static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4666 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667{
4668 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004669 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004670 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004672 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004674
4675 rcu_read_lock();
4676
4677 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004678 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004680
4681 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4682 continue;
4683
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684 num++;
4685
4686 if (c->sent < min) {
4687 min = c->sent;
4688 conn = c;
4689 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004690
4691 if (hci_conn_num(hdev, type) == num)
4692 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693 }
4694
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004695 rcu_read_unlock();
4696
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004698 int cnt, q;
4699
4700 switch (conn->type) {
4701 case ACL_LINK:
4702 cnt = hdev->acl_cnt;
4703 break;
4704 case SCO_LINK:
4705 case ESCO_LINK:
4706 cnt = hdev->sco_cnt;
4707 break;
4708 case LE_LINK:
4709 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4710 break;
4711 default:
4712 cnt = 0;
4713 BT_ERR("Unknown link type");
4714 }
4715
4716 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 *quote = q ? q : 1;
4718 } else
4719 *quote = 0;
4720
4721 BT_DBG("conn %p quote %d", conn, *quote);
4722 return conn;
4723}
4724
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004725static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726{
4727 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004728 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729
Ville Tervobae1f5d92011-02-10 22:38:53 -03004730 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004732 rcu_read_lock();
4733
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004735 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004736 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004737 BT_ERR("%s killing stalled connection %pMR",
4738 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004739 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 }
4741 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004742
4743 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744}
4745
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004746static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4747 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004748{
4749 struct hci_conn_hash *h = &hdev->conn_hash;
4750 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004751 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004752 struct hci_conn *conn;
4753 int cnt, q, conn_num = 0;
4754
4755 BT_DBG("%s", hdev->name);
4756
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004757 rcu_read_lock();
4758
4759 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004760 struct hci_chan *tmp;
4761
4762 if (conn->type != type)
4763 continue;
4764
4765 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4766 continue;
4767
4768 conn_num++;
4769
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004770 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004771 struct sk_buff *skb;
4772
4773 if (skb_queue_empty(&tmp->data_q))
4774 continue;
4775
4776 skb = skb_peek(&tmp->data_q);
4777 if (skb->priority < cur_prio)
4778 continue;
4779
4780 if (skb->priority > cur_prio) {
4781 num = 0;
4782 min = ~0;
4783 cur_prio = skb->priority;
4784 }
4785
4786 num++;
4787
4788 if (conn->sent < min) {
4789 min = conn->sent;
4790 chan = tmp;
4791 }
4792 }
4793
4794 if (hci_conn_num(hdev, type) == conn_num)
4795 break;
4796 }
4797
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004798 rcu_read_unlock();
4799
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004800 if (!chan)
4801 return NULL;
4802
4803 switch (chan->conn->type) {
4804 case ACL_LINK:
4805 cnt = hdev->acl_cnt;
4806 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004807 case AMP_LINK:
4808 cnt = hdev->block_cnt;
4809 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004810 case SCO_LINK:
4811 case ESCO_LINK:
4812 cnt = hdev->sco_cnt;
4813 break;
4814 case LE_LINK:
4815 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4816 break;
4817 default:
4818 cnt = 0;
4819 BT_ERR("Unknown link type");
4820 }
4821
4822 q = cnt / num;
4823 *quote = q ? q : 1;
4824 BT_DBG("chan %p quote %d", chan, *quote);
4825 return chan;
4826}
4827
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004828static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4829{
4830 struct hci_conn_hash *h = &hdev->conn_hash;
4831 struct hci_conn *conn;
4832 int num = 0;
4833
4834 BT_DBG("%s", hdev->name);
4835
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004836 rcu_read_lock();
4837
4838 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004839 struct hci_chan *chan;
4840
4841 if (conn->type != type)
4842 continue;
4843
4844 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4845 continue;
4846
4847 num++;
4848
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004849 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004850 struct sk_buff *skb;
4851
4852 if (chan->sent) {
4853 chan->sent = 0;
4854 continue;
4855 }
4856
4857 if (skb_queue_empty(&chan->data_q))
4858 continue;
4859
4860 skb = skb_peek(&chan->data_q);
4861 if (skb->priority >= HCI_PRIO_MAX - 1)
4862 continue;
4863
4864 skb->priority = HCI_PRIO_MAX - 1;
4865
4866 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004867 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004868 }
4869
4870 if (hci_conn_num(hdev, type) == num)
4871 break;
4872 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004873
4874 rcu_read_unlock();
4875
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004876}
4877
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004878static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4879{
4880 /* Calculate count of blocks used by this packet */
4881 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4882}
4883
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004884static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004885{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004886 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887 /* ACL tx timeout must be longer than maximum
4888 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004889 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004890 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004891 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004893}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004894
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004895static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004896{
4897 unsigned int cnt = hdev->acl_cnt;
4898 struct hci_chan *chan;
4899 struct sk_buff *skb;
4900 int quote;
4901
4902 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004903
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004904 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004905 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004906 u32 priority = (skb_peek(&chan->data_q))->priority;
4907 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004908 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004909 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004910
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004911 /* Stop if priority has changed */
4912 if (skb->priority < priority)
4913 break;
4914
4915 skb = skb_dequeue(&chan->data_q);
4916
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004917 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004918 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004919
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004920 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921 hdev->acl_last_tx = jiffies;
4922
4923 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004924 chan->sent++;
4925 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004926 }
4927 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004928
4929 if (cnt != hdev->acl_cnt)
4930 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931}
4932
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004933static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004934{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004935 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004936 struct hci_chan *chan;
4937 struct sk_buff *skb;
4938 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004939 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004940
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004941 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004942
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004943 BT_DBG("%s", hdev->name);
4944
4945 if (hdev->dev_type == HCI_AMP)
4946 type = AMP_LINK;
4947 else
4948 type = ACL_LINK;
4949
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004950 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004951 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004952 u32 priority = (skb_peek(&chan->data_q))->priority;
4953 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4954 int blocks;
4955
4956 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004957 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004958
4959 /* Stop if priority has changed */
4960 if (skb->priority < priority)
4961 break;
4962
4963 skb = skb_dequeue(&chan->data_q);
4964
4965 blocks = __get_blocks(hdev, skb);
4966 if (blocks > hdev->block_cnt)
4967 return;
4968
4969 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004970 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004971
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004972 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004973 hdev->acl_last_tx = jiffies;
4974
4975 hdev->block_cnt -= blocks;
4976 quote -= blocks;
4977
4978 chan->sent += blocks;
4979 chan->conn->sent += blocks;
4980 }
4981 }
4982
4983 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004984 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004985}
4986
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004987static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004988{
4989 BT_DBG("%s", hdev->name);
4990
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004991 /* No ACL link over BR/EDR controller */
4992 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4993 return;
4994
4995 /* No AMP link over AMP controller */
4996 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004997 return;
4998
4999 switch (hdev->flow_ctl_mode) {
5000 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5001 hci_sched_acl_pkt(hdev);
5002 break;
5003
5004 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5005 hci_sched_acl_blk(hdev);
5006 break;
5007 }
5008}
5009
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005011static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012{
5013 struct hci_conn *conn;
5014 struct sk_buff *skb;
5015 int quote;
5016
5017 BT_DBG("%s", hdev->name);
5018
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005019 if (!hci_conn_num(hdev, SCO_LINK))
5020 return;
5021
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5023 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5024 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005025 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026
5027 conn->sent++;
5028 if (conn->sent == ~0)
5029 conn->sent = 0;
5030 }
5031 }
5032}
5033
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005034static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005035{
5036 struct hci_conn *conn;
5037 struct sk_buff *skb;
5038 int quote;
5039
5040 BT_DBG("%s", hdev->name);
5041
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005042 if (!hci_conn_num(hdev, ESCO_LINK))
5043 return;
5044
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005045 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5046 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005047 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5048 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005049 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005050
5051 conn->sent++;
5052 if (conn->sent == ~0)
5053 conn->sent = 0;
5054 }
5055 }
5056}
5057
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005058static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005059{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005060 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005061 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005062 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005063
5064 BT_DBG("%s", hdev->name);
5065
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005066 if (!hci_conn_num(hdev, LE_LINK))
5067 return;
5068
Marcel Holtmann4a964402014-07-02 19:10:33 +02005069 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005070 /* LE tx timeout must be longer than maximum
5071 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005072 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005073 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005074 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005075 }
5076
5077 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005078 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005079 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005080 u32 priority = (skb_peek(&chan->data_q))->priority;
5081 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005082 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005083 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005084
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005085 /* Stop if priority has changed */
5086 if (skb->priority < priority)
5087 break;
5088
5089 skb = skb_dequeue(&chan->data_q);
5090
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005091 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005092 hdev->le_last_tx = jiffies;
5093
5094 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005095 chan->sent++;
5096 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005097 }
5098 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005099
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005100 if (hdev->le_pkts)
5101 hdev->le_cnt = cnt;
5102 else
5103 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005104
5105 if (cnt != tmp)
5106 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005107}
5108
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005109static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005111 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112 struct sk_buff *skb;
5113
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005114 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005115 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116
Marcel Holtmann52de5992013-09-03 18:08:38 -07005117 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5118 /* Schedule queues and send stuff to HCI driver */
5119 hci_sched_acl(hdev);
5120 hci_sched_sco(hdev);
5121 hci_sched_esco(hdev);
5122 hci_sched_le(hdev);
5123 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005124
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125 /* Send next queued raw (unknown type) packet */
5126 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005127 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128}
5129
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005130/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131
5132/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005133static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134{
5135 struct hci_acl_hdr *hdr = (void *) skb->data;
5136 struct hci_conn *conn;
5137 __u16 handle, flags;
5138
5139 skb_pull(skb, HCI_ACL_HDR_SIZE);
5140
5141 handle = __le16_to_cpu(hdr->handle);
5142 flags = hci_flags(handle);
5143 handle = hci_handle(handle);
5144
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005145 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005146 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147
5148 hdev->stat.acl_rx++;
5149
5150 hci_dev_lock(hdev);
5151 conn = hci_conn_hash_lookup_handle(hdev, handle);
5152 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005153
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005155 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005156
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005158 l2cap_recv_acldata(conn, skb, flags);
5159 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005161 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005162 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163 }
5164
5165 kfree_skb(skb);
5166}
5167
5168/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005169static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170{
5171 struct hci_sco_hdr *hdr = (void *) skb->data;
5172 struct hci_conn *conn;
5173 __u16 handle;
5174
5175 skb_pull(skb, HCI_SCO_HDR_SIZE);
5176
5177 handle = __le16_to_cpu(hdr->handle);
5178
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005179 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180
5181 hdev->stat.sco_rx++;
5182
5183 hci_dev_lock(hdev);
5184 conn = hci_conn_hash_lookup_handle(hdev, handle);
5185 hci_dev_unlock(hdev);
5186
5187 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005188 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005189 sco_recv_scodata(conn, skb);
5190 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005192 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005193 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194 }
5195
5196 kfree_skb(skb);
5197}
5198
Johan Hedberg9238f362013-03-05 20:37:48 +02005199static bool hci_req_is_complete(struct hci_dev *hdev)
5200{
5201 struct sk_buff *skb;
5202
5203 skb = skb_peek(&hdev->cmd_q);
5204 if (!skb)
5205 return true;
5206
5207 return bt_cb(skb)->req.start;
5208}
5209
Johan Hedberg42c6b122013-03-05 20:37:49 +02005210static void hci_resend_last(struct hci_dev *hdev)
5211{
5212 struct hci_command_hdr *sent;
5213 struct sk_buff *skb;
5214 u16 opcode;
5215
5216 if (!hdev->sent_cmd)
5217 return;
5218
5219 sent = (void *) hdev->sent_cmd->data;
5220 opcode = __le16_to_cpu(sent->opcode);
5221 if (opcode == HCI_OP_RESET)
5222 return;
5223
5224 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5225 if (!skb)
5226 return;
5227
5228 skb_queue_head(&hdev->cmd_q, skb);
5229 queue_work(hdev->workqueue, &hdev->cmd_work);
5230}
5231
Johan Hedberg9238f362013-03-05 20:37:48 +02005232void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5233{
5234 hci_req_complete_t req_complete = NULL;
5235 struct sk_buff *skb;
5236 unsigned long flags;
5237
5238 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5239
Johan Hedberg42c6b122013-03-05 20:37:49 +02005240 /* If the completed command doesn't match the last one that was
5241 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005242 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005243 if (!hci_sent_cmd_data(hdev, opcode)) {
5244 /* Some CSR based controllers generate a spontaneous
5245 * reset complete event during init and any pending
5246 * command will never be completed. In such a case we
5247 * need to resend whatever was the last sent
5248 * command.
5249 */
5250 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5251 hci_resend_last(hdev);
5252
Johan Hedberg9238f362013-03-05 20:37:48 +02005253 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005254 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005255
5256 /* If the command succeeded and there's still more commands in
5257 * this request the request is not yet complete.
5258 */
5259 if (!status && !hci_req_is_complete(hdev))
5260 return;
5261
5262 /* If this was the last command in a request the complete
5263 * callback would be found in hdev->sent_cmd instead of the
5264 * command queue (hdev->cmd_q).
5265 */
5266 if (hdev->sent_cmd) {
5267 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005268
5269 if (req_complete) {
5270 /* We must set the complete callback to NULL to
5271 * avoid calling the callback more than once if
5272 * this function gets called again.
5273 */
5274 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5275
Johan Hedberg9238f362013-03-05 20:37:48 +02005276 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005277 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005278 }
5279
5280 /* Remove all pending commands belonging to this request */
5281 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5282 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5283 if (bt_cb(skb)->req.start) {
5284 __skb_queue_head(&hdev->cmd_q, skb);
5285 break;
5286 }
5287
5288 req_complete = bt_cb(skb)->req.complete;
5289 kfree_skb(skb);
5290 }
5291 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5292
5293call_complete:
5294 if (req_complete)
5295 req_complete(hdev, status);
5296}
5297
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005298static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005300 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005301 struct sk_buff *skb;
5302
5303 BT_DBG("%s", hdev->name);
5304
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005306 /* Send copy to monitor */
5307 hci_send_to_monitor(hdev, skb);
5308
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309 if (atomic_read(&hdev->promisc)) {
5310 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005311 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312 }
5313
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005314 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 kfree_skb(skb);
5316 continue;
5317 }
5318
5319 if (test_bit(HCI_INIT, &hdev->flags)) {
5320 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005321 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322 case HCI_ACLDATA_PKT:
5323 case HCI_SCODATA_PKT:
5324 kfree_skb(skb);
5325 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327 }
5328
5329 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005330 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005331 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005332 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 hci_event_packet(hdev, skb);
5334 break;
5335
5336 case HCI_ACLDATA_PKT:
5337 BT_DBG("%s ACL data packet", hdev->name);
5338 hci_acldata_packet(hdev, skb);
5339 break;
5340
5341 case HCI_SCODATA_PKT:
5342 BT_DBG("%s SCO data packet", hdev->name);
5343 hci_scodata_packet(hdev, skb);
5344 break;
5345
5346 default:
5347 kfree_skb(skb);
5348 break;
5349 }
5350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005351}
5352
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005353static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005354{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005355 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005356 struct sk_buff *skb;
5357
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005358 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5359 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005362 if (atomic_read(&hdev->cmd_cnt)) {
5363 skb = skb_dequeue(&hdev->cmd_q);
5364 if (!skb)
5365 return;
5366
Wei Yongjun7585b972009-02-25 18:29:52 +08005367 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005369 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005370 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005371 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005372 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005373 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005374 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005375 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005376 schedule_delayed_work(&hdev->cmd_timer,
5377 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 } else {
5379 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005380 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381 }
5382 }
5383}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005384
5385void hci_req_add_le_scan_disable(struct hci_request *req)
5386{
5387 struct hci_cp_le_set_scan_enable cp;
5388
5389 memset(&cp, 0, sizeof(cp));
5390 cp.enable = LE_SCAN_DISABLE;
5391 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5392}
Andre Guedesa4790db2014-02-26 20:21:47 -03005393
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005394void hci_req_add_le_passive_scan(struct hci_request *req)
5395{
5396 struct hci_cp_le_set_scan_param param_cp;
5397 struct hci_cp_le_set_scan_enable enable_cp;
5398 struct hci_dev *hdev = req->hdev;
5399 u8 own_addr_type;
5400
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005401 /* Set require_privacy to false since no SCAN_REQ are send
5402 * during passive scanning. Not using an unresolvable address
5403 * here is important so that peer devices using direct
5404 * advertising with our address will be correctly reported
5405 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005406 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005407 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005408 return;
5409
5410 memset(&param_cp, 0, sizeof(param_cp));
5411 param_cp.type = LE_SCAN_PASSIVE;
5412 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5413 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5414 param_cp.own_address_type = own_addr_type;
5415 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5416 &param_cp);
5417
5418 memset(&enable_cp, 0, sizeof(enable_cp));
5419 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005420 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005421 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5422 &enable_cp);
5423}
5424
Andre Guedesa4790db2014-02-26 20:21:47 -03005425static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5426{
5427 if (status)
5428 BT_DBG("HCI request failed to update background scanning: "
5429 "status 0x%2.2x", status);
5430}
5431
5432/* This function controls the background scanning based on hdev->pend_le_conns
5433 * list. If there are pending LE connection we start the background scanning,
5434 * otherwise we stop it.
5435 *
5436 * This function requires the caller holds hdev->lock.
5437 */
5438void hci_update_background_scan(struct hci_dev *hdev)
5439{
Andre Guedesa4790db2014-02-26 20:21:47 -03005440 struct hci_request req;
5441 struct hci_conn *conn;
5442 int err;
5443
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005444 if (!test_bit(HCI_UP, &hdev->flags) ||
5445 test_bit(HCI_INIT, &hdev->flags) ||
5446 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005447 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005448 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005449 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005450 return;
5451
Johan Hedberga70f4b52014-07-07 15:19:50 +03005452 /* No point in doing scanning if LE support hasn't been enabled */
5453 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5454 return;
5455
Johan Hedbergae23ada2014-07-07 13:24:59 +03005456 /* If discovery is active don't interfere with it */
5457 if (hdev->discovery.state != DISCOVERY_STOPPED)
5458 return;
5459
Andre Guedesa4790db2014-02-26 20:21:47 -03005460 hci_req_init(&req, hdev);
5461
Johan Hedberg2b7be332014-07-07 14:40:22 +03005462 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5463 list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005464 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005465 /* If there is no pending LE connections or devices
5466 * to be scanned for, we should stop the background
5467 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005468 */
5469
5470 /* If controller is not scanning we are done. */
5471 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5472 return;
5473
5474 hci_req_add_le_scan_disable(&req);
5475
5476 BT_DBG("%s stopping background scanning", hdev->name);
5477 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005478 /* If there is at least one pending LE connection, we should
5479 * keep the background scan running.
5480 */
5481
Andre Guedesa4790db2014-02-26 20:21:47 -03005482 /* If controller is connecting, we should not start scanning
5483 * since some controllers are not able to scan and connect at
5484 * the same time.
5485 */
5486 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5487 if (conn)
5488 return;
5489
Andre Guedes4340a122014-03-10 18:26:24 -03005490 /* If controller is currently scanning, we stop it to ensure we
5491 * don't miss any advertising (due to duplicates filter).
5492 */
5493 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5494 hci_req_add_le_scan_disable(&req);
5495
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005496 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005497
5498 BT_DBG("%s starting background scanning", hdev->name);
5499 }
5500
5501 err = hci_req_run(&req, update_background_scan_complete);
5502 if (err)
5503 BT_ERR("Failed to run HCI request: err %d", err);
5504}