blob: 475d6003ed15fba621bfbbc449ac4bddda00b394 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Johan Hedberg66593582014-07-09 12:59:14 +0300203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmann47219832013-10-17 17:24:15 -0700228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700236
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700243
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700244 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
Marcel Holtmann041000b2013-10-17 12:02:31 -0700351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
Marcel Holtmann111902f2014-06-21 04:53:17 +0200421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800422 return -EALREADY;
423
Marcel Holtmann111902f2014-06-21 04:53:17 +0200424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700462 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700521 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700549 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
Marcel Holtmannac345812014-02-23 12:44:25 -0800625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200628 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
Johan Hedberga1f4c312014-02-27 14:05:41 +0200633 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800634
Johan Hedberga1f4c312014-02-27 14:05:41 +0200635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800636 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700704{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712}
713
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
722
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
725
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700731 return -EINVAL;
732
Marcel Holtmann111902f2014-06-21 04:53:17 +0200733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800734 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700735
Marcel Holtmann111902f2014-06-21 04:53:17 +0200736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800737
738 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700739}
740
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
Marcel Holtmann92202182013-10-18 16:38:10 -0700747
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
Marcel Holtmann3698d702014-02-18 21:54:49 -0800773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800809 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800814 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700869 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
Marcel Holtmannf1649572014-06-30 12:34:38 +0200917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200973static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300974{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200975 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 struct hci_conn_params *p;
977
978 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300979 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300981 p->auto_connect);
982 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300983 hci_dev_unlock(hdev);
984
985 return 0;
986}
987
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200988static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300989{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200990 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300991}
992
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200993static const struct file_operations device_list_fops = {
994 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300996 .llseek = seq_lseek,
997 .release = single_release,
998};
999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000/* ---- HCI requests ---- */
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1010 }
1011}
1012
1013static void hci_req_cancel(struct hci_dev *hdev, int err)
1014{
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1021 }
1022}
1023
Fengguang Wu77a63e02013-04-20 16:24:31 +03001024static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001026{
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1030
1031 hci_dev_lock(hdev);
1032
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1035
1036 hci_dev_unlock(hdev);
1037
1038 if (!skb)
1039 return ERR_PTR(-ENODATA);
1040
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1043 goto failed;
1044 }
1045
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001049 if (event) {
1050 if (hdr->evt != event)
1051 goto failed;
1052 return skb;
1053 }
1054
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057 goto failed;
1058 }
1059
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1062 goto failed;
1063 }
1064
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1067
1068 if (opcode == __le16_to_cpu(ev->opcode))
1069 return skb;
1070
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1073
1074failed:
1075 kfree_skb(skb);
1076 return ERR_PTR(-ENODATA);
1077}
1078
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001079struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001080 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001081{
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1084 int err = 0;
1085
1086 BT_DBG("%s", hdev->name);
1087
1088 hci_req_init(&req, hdev);
1089
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001090 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001091
1092 hdev->req_status = HCI_REQ_PEND;
1093
1094 err = hci_req_run(&req, hci_req_sync_complete);
1095 if (err < 0)
1096 return ERR_PTR(err);
1097
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1100
1101 schedule_timeout(timeout);
1102
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1107
1108 switch (hdev->req_status) {
1109 case HCI_REQ_DONE:
1110 err = -bt_to_errno(hdev->req_result);
1111 break;
1112
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1115 break;
1116
1117 default:
1118 err = -ETIMEDOUT;
1119 break;
1120 }
1121
1122 hdev->req_status = hdev->req_result = 0;
1123
1124 BT_DBG("%s end: err %d", hdev->name, err);
1125
1126 if (err < 0)
1127 return ERR_PTR(err);
1128
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001129 return hci_get_cmd_complete(hdev, opcode, event);
1130}
1131EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001134 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001135{
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137}
1138EXPORT_SYMBOL(__hci_cmd_sync);
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001141static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 void (*func)(struct hci_request *req,
1143 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001144 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 DECLARE_WAITQUEUE(wait, current);
1148 int err = 0;
1149
1150 BT_DBG("%s start", hdev->name);
1151
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_init(&req, hdev);
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 hdev->req_status = HCI_REQ_PEND;
1155
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001157
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 err = hci_req_run(&req, hci_req_sync_complete);
1159 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001160 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001161
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 */
Andre Guedes920c8302013-03-08 11:20:15 -03001167 if (err == -ENODATA)
1168 return 0;
1169
1170 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001171 }
1172
Andre Guedesbc4445c2013-03-08 11:20:13 -03001173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return -EINTR;
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001185 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Johan Hedberga5040ef2011-01-10 13:28:59 +02001197 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 return err;
1202}
1203
Johan Hedberg01178cd2013-03-05 20:37:41 +02001204static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 void (*req)(struct hci_request *req,
1206 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001207 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
1209 int ret;
1210
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001211 if (!test_bit(HCI_UP, &hdev->flags))
1212 return -ENETDOWN;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 /* Serialize all requests */
1215 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 hci_req_unlock(hdev);
1218
1219 return ret;
1220}
1221
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001238 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240
1241 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001246{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001248
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001249 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001251
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001258 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001260
1261 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001263
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001269}
1270
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001272{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001274
1275 BT_DBG("%s %ld", hdev->name, opt);
1276
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001277 /* Reset */
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001280
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281 switch (hdev->dev_type) {
1282 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 break;
1285
1286 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001288 break;
1289
1290 default:
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1292 break;
1293 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001294}
1295
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001298 struct hci_dev *hdev = req->hdev;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 __le16 param;
1301 __u8 flt_type;
1302
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305
1306 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308
1309 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001311
1312 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001314
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001324
1325 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001326 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1331 */
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001336}
1337
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001340 struct hci_dev *hdev = req->hdev;
1341
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344
1345 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
Johan Hedberg2177bab2013-03-05 20:37:43 +02001351 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001356
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360}
1361
1362static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363{
1364 if (lmp_ext_inq_capable(hdev))
1365 return 0x02;
1366
1367 if (lmp_inq_rssi_capable(hdev))
1368 return 0x01;
1369
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1372 return 0x01;
1373
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376 return 0x01;
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378 return 0x01;
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380 return 0x01;
1381 }
1382
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1385 return 0x01;
1386
1387 return 0x00;
1388}
1389
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391{
1392 u8 mode;
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395
Johan Hedberg42c6b122013-03-05 20:37:49 +02001396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397}
1398
Johan Hedberg42c6b122013-03-05 20:37:49 +02001399static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 struct hci_dev *hdev = req->hdev;
1402
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1406 */
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1411 */
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413 return;
1414
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001421 } else {
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001431
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001436 }
1437
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1443
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1467 */
1468 }
1469
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1472
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001474}
1475
Johan Hedberg42c6b122013-03-05 20:37:49 +02001476static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 struct hci_dev *hdev = req->hdev;
1479
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001482 else
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001484
1485 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1490 */
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001493
1494 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1500 */
1501 hdev->max_page = 0x01;
1502
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507 } else {
1508 struct hci_cp_write_eir cp;
1509
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1512
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515 }
1516
1517 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001518 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1525
1526 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529 }
1530
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001535 }
1536}
1537
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001539{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1543
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1552
1553 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555}
1556
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001560 struct hci_cp_write_le_host_supported cp;
1561
Johan Hedbergc73eee92013-04-19 18:35:21 +03001562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1564 return;
1565
Johan Hedberg2177bab2013-03-05 20:37:43 +02001566 memset(&cp, 0, sizeof(cp));
1567
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569 cp.le = 0x01;
1570 cp.simul = lmp_le_br_capable(hdev);
1571 }
1572
1573 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576}
1577
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001578static void hci_set_event_mask_page_2(struct hci_request *req)
1579{
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1585 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001586 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1591 }
1592
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1595 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001596 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1601 }
1602
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001603 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001605 events[2] |= 0x80;
1606
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608}
1609
Johan Hedberg42c6b122013-03-05 20:37:49 +02001610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001614
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001615 hci_setup_event_mask(req);
1616
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1620 *
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001625 *
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001629 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001632 struct hci_cp_delete_stored_link_key cp;
1633
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637 sizeof(cp), &cp);
1638 }
1639
Johan Hedberg2177bab2013-03-05 20:37:43 +02001640 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001641 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001642
Andre Guedes9193c6e2014-07-01 18:10:09 -03001643 if (lmp_le_capable(hdev)) {
1644 u8 events[8];
1645
1646 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001647 events[0] = 0x0f;
1648
1649 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001651
1652 /* If controller supports the Connection Parameters Request
1653 * Link Layer Procedure, enable the corresponding event.
1654 */
1655 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656 events[0] |= 0x20; /* LE Remote Connection
1657 * Parameter Request
1658 */
1659
Andre Guedes9193c6e2014-07-01 18:10:09 -03001660 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1661 events);
1662
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001663 if (hdev->commands[25] & 0x40) {
1664 /* Read LE Advertising Channel TX Power */
1665 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1666 }
1667
Johan Hedberg42c6b122013-03-05 20:37:49 +02001668 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001669 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001670
1671 /* Read features beyond page 1 if available */
1672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673 struct hci_cp_read_local_ext_features cp;
1674
1675 cp.page = p;
1676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1677 sizeof(cp), &cp);
1678 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001679}
1680
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001681static void hci_init4_req(struct hci_request *req, unsigned long opt)
1682{
1683 struct hci_dev *hdev = req->hdev;
1684
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001685 /* Set event mask page 2 if the HCI command for it is supported */
1686 if (hdev->commands[22] & 0x04)
1687 hci_set_event_mask_page_2(req);
1688
Marcel Holtmann109e3192014-07-23 19:24:56 +02001689 /* Read local codec list if the HCI command is supported */
1690 if (hdev->commands[29] & 0x20)
1691 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1692
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001693 /* Get MWS transport configuration if the HCI command is supported */
1694 if (hdev->commands[30] & 0x08)
1695 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1696
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001697 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001698 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001699 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001700
1701 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001702 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001703 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001704 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1705 u8 support = 0x01;
1706 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1707 sizeof(support), &support);
1708 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001709}
1710
Johan Hedberg2177bab2013-03-05 20:37:43 +02001711static int __hci_init(struct hci_dev *hdev)
1712{
1713 int err;
1714
1715 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1716 if (err < 0)
1717 return err;
1718
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001719 /* The Device Under Test (DUT) mode is special and available for
1720 * all controller types. So just create it early on.
1721 */
1722 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1723 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1724 &dut_mode_fops);
1725 }
1726
Johan Hedberg2177bab2013-03-05 20:37:43 +02001727 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1728 * BR/EDR/LE type controllers. AMP controllers only need the
1729 * first stage init.
1730 */
1731 if (hdev->dev_type != HCI_BREDR)
1732 return 0;
1733
1734 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1735 if (err < 0)
1736 return err;
1737
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001738 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1739 if (err < 0)
1740 return err;
1741
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001742 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1743 if (err < 0)
1744 return err;
1745
1746 /* Only create debugfs entries during the initial setup
1747 * phase and not every time the controller gets powered on.
1748 */
1749 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1750 return 0;
1751
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001752 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1753 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001754 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1755 &hdev->manufacturer);
1756 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1757 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001758 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1759 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001760 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1761 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001762 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1763
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001764 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1765 &conn_info_min_age_fops);
1766 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1767 &conn_info_max_age_fops);
1768
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001769 if (lmp_bredr_capable(hdev)) {
1770 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1771 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001772 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1773 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001774 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1775 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001776 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1777 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001778 }
1779
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001780 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001781 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1782 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001783 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1784 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001785 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1786 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001787 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001788
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001789 if (lmp_sniff_capable(hdev)) {
1790 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1791 hdev, &idle_timeout_fops);
1792 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1793 hdev, &sniff_min_interval_fops);
1794 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1795 hdev, &sniff_max_interval_fops);
1796 }
1797
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001798 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001799 debugfs_create_file("identity", 0400, hdev->debugfs,
1800 hdev, &identity_fops);
1801 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1802 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001803 debugfs_create_file("random_address", 0444, hdev->debugfs,
1804 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001805 debugfs_create_file("static_address", 0444, hdev->debugfs,
1806 hdev, &static_address_fops);
1807
1808 /* For controllers with a public address, provide a debug
1809 * option to force the usage of the configured static
1810 * address. By default the public address is used.
1811 */
1812 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1813 debugfs_create_file("force_static_address", 0644,
1814 hdev->debugfs, hdev,
1815 &force_static_address_fops);
1816
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001817 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1818 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001819 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1820 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001821 debugfs_create_file("identity_resolving_keys", 0400,
1822 hdev->debugfs, hdev,
1823 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001824 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1825 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001826 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1827 hdev, &conn_min_interval_fops);
1828 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1829 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001830 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1831 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001832 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1833 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001834 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1835 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001836 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1837 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001838 debugfs_create_u16("discov_interleaved_timeout", 0644,
1839 hdev->debugfs,
1840 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001841 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001842
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001843 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001844}
1845
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001846static void hci_init0_req(struct hci_request *req, unsigned long opt)
1847{
1848 struct hci_dev *hdev = req->hdev;
1849
1850 BT_DBG("%s %ld", hdev->name, opt);
1851
1852 /* Reset */
1853 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1854 hci_reset_req(req, 0);
1855
1856 /* Read Local Version */
1857 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1858
1859 /* Read BD Address */
1860 if (hdev->set_bdaddr)
1861 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1862}
1863
1864static int __hci_unconf_init(struct hci_dev *hdev)
1865{
1866 int err;
1867
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001868 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1869 return 0;
1870
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001871 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1872 if (err < 0)
1873 return err;
1874
1875 return 0;
1876}
1877
Johan Hedberg42c6b122013-03-05 20:37:49 +02001878static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879{
1880 __u8 scan = opt;
1881
Johan Hedberg42c6b122013-03-05 20:37:49 +02001882 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
1884 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001885 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886}
1887
Johan Hedberg42c6b122013-03-05 20:37:49 +02001888static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889{
1890 __u8 auth = opt;
1891
Johan Hedberg42c6b122013-03-05 20:37:49 +02001892 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
1894 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001895 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896}
1897
Johan Hedberg42c6b122013-03-05 20:37:49 +02001898static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899{
1900 __u8 encrypt = opt;
1901
Johan Hedberg42c6b122013-03-05 20:37:49 +02001902 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001904 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001905 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906}
1907
Johan Hedberg42c6b122013-03-05 20:37:49 +02001908static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001909{
1910 __le16 policy = cpu_to_le16(opt);
1911
Johan Hedberg42c6b122013-03-05 20:37:49 +02001912 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001913
1914 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001915 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001916}
1917
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001918/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 * Device is held on return. */
1920struct hci_dev *hci_dev_get(int index)
1921{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001922 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
1924 BT_DBG("%d", index);
1925
1926 if (index < 0)
1927 return NULL;
1928
1929 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001930 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 if (d->id == index) {
1932 hdev = hci_dev_hold(d);
1933 break;
1934 }
1935 }
1936 read_unlock(&hci_dev_list_lock);
1937 return hdev;
1938}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939
1940/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001941
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001942bool hci_discovery_active(struct hci_dev *hdev)
1943{
1944 struct discovery_state *discov = &hdev->discovery;
1945
Andre Guedes6fbe1952012-02-03 17:47:58 -03001946 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001947 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001948 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001949 return true;
1950
Andre Guedes6fbe1952012-02-03 17:47:58 -03001951 default:
1952 return false;
1953 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001954}
1955
Johan Hedbergff9ef572012-01-04 14:23:45 +02001956void hci_discovery_set_state(struct hci_dev *hdev, int state)
1957{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001958 int old_state = hdev->discovery.state;
1959
Johan Hedbergff9ef572012-01-04 14:23:45 +02001960 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1961
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001962 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001963 return;
1964
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001965 hdev->discovery.state = state;
1966
Johan Hedbergff9ef572012-01-04 14:23:45 +02001967 switch (state) {
1968 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001969 hci_update_background_scan(hdev);
1970
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001971 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001972 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001973 break;
1974 case DISCOVERY_STARTING:
1975 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001976 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001977 mgmt_discovering(hdev, 1);
1978 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001979 case DISCOVERY_RESOLVING:
1980 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001981 case DISCOVERY_STOPPING:
1982 break;
1983 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001984}
1985
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001986void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987{
Johan Hedberg30883512012-01-04 14:16:21 +02001988 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001989 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
Johan Hedberg561aafb2012-01-04 13:31:59 +02001991 list_for_each_entry_safe(p, n, &cache->all, all) {
1992 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001993 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001995
1996 INIT_LIST_HEAD(&cache->unknown);
1997 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998}
1999
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002000struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2001 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002{
Johan Hedberg30883512012-01-04 14:16:21 +02002003 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 struct inquiry_entry *e;
2005
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002006 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
Johan Hedberg561aafb2012-01-04 13:31:59 +02002008 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002010 return e;
2011 }
2012
2013 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014}
2015
Johan Hedberg561aafb2012-01-04 13:31:59 +02002016struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002017 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002018{
Johan Hedberg30883512012-01-04 14:16:21 +02002019 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002020 struct inquiry_entry *e;
2021
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002022 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002023
2024 list_for_each_entry(e, &cache->unknown, list) {
2025 if (!bacmp(&e->data.bdaddr, bdaddr))
2026 return e;
2027 }
2028
2029 return NULL;
2030}
2031
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002032struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002033 bdaddr_t *bdaddr,
2034 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002035{
2036 struct discovery_state *cache = &hdev->discovery;
2037 struct inquiry_entry *e;
2038
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002039 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002040
2041 list_for_each_entry(e, &cache->resolve, list) {
2042 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2043 return e;
2044 if (!bacmp(&e->data.bdaddr, bdaddr))
2045 return e;
2046 }
2047
2048 return NULL;
2049}
2050
Johan Hedberga3d4e202012-01-09 00:53:02 +02002051void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002052 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002053{
2054 struct discovery_state *cache = &hdev->discovery;
2055 struct list_head *pos = &cache->resolve;
2056 struct inquiry_entry *p;
2057
2058 list_del(&ie->list);
2059
2060 list_for_each_entry(p, &cache->resolve, list) {
2061 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002062 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002063 break;
2064 pos = &p->list;
2065 }
2066
2067 list_add(&ie->list, pos);
2068}
2069
Marcel Holtmannaf589252014-07-01 14:11:20 +02002070u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2071 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072{
Johan Hedberg30883512012-01-04 14:16:21 +02002073 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002074 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002075 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002077 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Szymon Janc2b2fec42012-11-20 11:38:54 +01002079 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2080
Marcel Holtmannaf589252014-07-01 14:11:20 +02002081 if (!data->ssp_mode)
2082 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002083
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002084 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002085 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002086 if (!ie->data.ssp_mode)
2087 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002088
Johan Hedberga3d4e202012-01-09 00:53:02 +02002089 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002090 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002091 ie->data.rssi = data->rssi;
2092 hci_inquiry_cache_update_resolve(hdev, ie);
2093 }
2094
Johan Hedberg561aafb2012-01-04 13:31:59 +02002095 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002096 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002097
Johan Hedberg561aafb2012-01-04 13:31:59 +02002098 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002099 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002100 if (!ie) {
2101 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2102 goto done;
2103 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002104
2105 list_add(&ie->all, &cache->all);
2106
2107 if (name_known) {
2108 ie->name_state = NAME_KNOWN;
2109 } else {
2110 ie->name_state = NAME_NOT_KNOWN;
2111 list_add(&ie->list, &cache->unknown);
2112 }
2113
2114update:
2115 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002116 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002117 ie->name_state = NAME_KNOWN;
2118 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 }
2120
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002121 memcpy(&ie->data, data, sizeof(*data));
2122 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002124
2125 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002126 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002127
Marcel Holtmannaf589252014-07-01 14:11:20 +02002128done:
2129 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130}
2131
2132static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2133{
Johan Hedberg30883512012-01-04 14:16:21 +02002134 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 struct inquiry_info *info = (struct inquiry_info *) buf;
2136 struct inquiry_entry *e;
2137 int copied = 0;
2138
Johan Hedberg561aafb2012-01-04 13:31:59 +02002139 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002141
2142 if (copied >= num)
2143 break;
2144
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 bacpy(&info->bdaddr, &data->bdaddr);
2146 info->pscan_rep_mode = data->pscan_rep_mode;
2147 info->pscan_period_mode = data->pscan_period_mode;
2148 info->pscan_mode = data->pscan_mode;
2149 memcpy(info->dev_class, data->dev_class, 3);
2150 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002151
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002153 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 }
2155
2156 BT_DBG("cache %p, copied %d", cache, copied);
2157 return copied;
2158}
2159
Johan Hedberg42c6b122013-03-05 20:37:49 +02002160static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161{
2162 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002163 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 struct hci_cp_inquiry cp;
2165
2166 BT_DBG("%s", hdev->name);
2167
2168 if (test_bit(HCI_INQUIRY, &hdev->flags))
2169 return;
2170
2171 /* Start Inquiry */
2172 memcpy(&cp.lap, &ir->lap, 3);
2173 cp.length = ir->length;
2174 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002175 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176}
2177
Andre Guedes3e13fa12013-03-27 20:04:56 -03002178static int wait_inquiry(void *word)
2179{
2180 schedule();
2181 return signal_pending(current);
2182}
2183
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184int hci_inquiry(void __user *arg)
2185{
2186 __u8 __user *ptr = arg;
2187 struct hci_inquiry_req ir;
2188 struct hci_dev *hdev;
2189 int err = 0, do_inquiry = 0, max_rsp;
2190 long timeo;
2191 __u8 *buf;
2192
2193 if (copy_from_user(&ir, ptr, sizeof(ir)))
2194 return -EFAULT;
2195
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002196 hdev = hci_dev_get(ir.dev_id);
2197 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 return -ENODEV;
2199
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002200 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2201 err = -EBUSY;
2202 goto done;
2203 }
2204
Marcel Holtmann4a964402014-07-02 19:10:33 +02002205 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002206 err = -EOPNOTSUPP;
2207 goto done;
2208 }
2209
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002210 if (hdev->dev_type != HCI_BREDR) {
2211 err = -EOPNOTSUPP;
2212 goto done;
2213 }
2214
Johan Hedberg56f87902013-10-02 13:43:13 +03002215 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2216 err = -EOPNOTSUPP;
2217 goto done;
2218 }
2219
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002220 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002221 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002222 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002223 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 do_inquiry = 1;
2225 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002226 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Marcel Holtmann04837f62006-07-03 10:02:33 +02002228 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002229
2230 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002231 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2232 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002233 if (err < 0)
2234 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002235
2236 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2237 * cleared). If it is interrupted by a signal, return -EINTR.
2238 */
2239 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2240 TASK_INTERRUPTIBLE))
2241 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002244 /* for unlimited number of responses we will use buffer with
2245 * 255 entries
2246 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2248
2249 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2250 * copy it to the user space.
2251 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002252 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002253 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 err = -ENOMEM;
2255 goto done;
2256 }
2257
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002258 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002260 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262 BT_DBG("num_rsp %d", ir.num_rsp);
2263
2264 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2265 ptr += sizeof(ir);
2266 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002267 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002269 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 err = -EFAULT;
2271
2272 kfree(buf);
2273
2274done:
2275 hci_dev_put(hdev);
2276 return err;
2277}
2278
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002279static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 int ret = 0;
2282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 BT_DBG("%s %p", hdev->name, hdev);
2284
2285 hci_req_lock(hdev);
2286
Johan Hovold94324962012-03-15 14:48:41 +01002287 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2288 ret = -ENODEV;
2289 goto done;
2290 }
2291
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002292 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2293 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002294 /* Check for rfkill but allow the HCI setup stage to
2295 * proceed (which in itself doesn't cause any RF activity).
2296 */
2297 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2298 ret = -ERFKILL;
2299 goto done;
2300 }
2301
2302 /* Check for valid public address or a configured static
2303 * random adddress, but let the HCI setup proceed to
2304 * be able to determine if there is a public address
2305 * or not.
2306 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002307 * In case of user channel usage, it is not important
2308 * if a public address or static random address is
2309 * available.
2310 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002311 * This check is only valid for BR/EDR controllers
2312 * since AMP controllers do not have an address.
2313 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002314 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2315 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002316 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2317 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2318 ret = -EADDRNOTAVAIL;
2319 goto done;
2320 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002321 }
2322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 if (test_bit(HCI_UP, &hdev->flags)) {
2324 ret = -EALREADY;
2325 goto done;
2326 }
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 if (hdev->open(hdev)) {
2329 ret = -EIO;
2330 goto done;
2331 }
2332
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002333 atomic_set(&hdev->cmd_cnt, 1);
2334 set_bit(HCI_INIT, &hdev->flags);
2335
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002336 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2337 if (hdev->setup)
2338 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002339
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002340 /* The transport driver can set these quirks before
2341 * creating the HCI device or in its setup callback.
2342 *
2343 * In case any of them is set, the controller has to
2344 * start up as unconfigured.
2345 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002346 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2347 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002348 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002349
2350 /* For an unconfigured controller it is required to
2351 * read at least the version information provided by
2352 * the Read Local Version Information command.
2353 *
2354 * If the set_bdaddr driver callback is provided, then
2355 * also the original Bluetooth public device address
2356 * will be read using the Read BD Address command.
2357 */
2358 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2359 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002360 }
2361
Marcel Holtmann9713c172014-07-06 12:11:15 +02002362 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2363 /* If public address change is configured, ensure that
2364 * the address gets programmed. If the driver does not
2365 * support changing the public address, fail the power
2366 * on procedure.
2367 */
2368 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2369 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002370 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2371 else
2372 ret = -EADDRNOTAVAIL;
2373 }
2374
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002375 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002376 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002377 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002378 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 }
2380
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002381 clear_bit(HCI_INIT, &hdev->flags);
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 if (!ret) {
2384 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002385 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 set_bit(HCI_UP, &hdev->flags);
2387 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002388 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002389 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002390 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002391 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002392 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002393 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002394 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002395 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002396 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002397 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002399 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002400 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002401 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
2403 skb_queue_purge(&hdev->cmd_q);
2404 skb_queue_purge(&hdev->rx_q);
2405
2406 if (hdev->flush)
2407 hdev->flush(hdev);
2408
2409 if (hdev->sent_cmd) {
2410 kfree_skb(hdev->sent_cmd);
2411 hdev->sent_cmd = NULL;
2412 }
2413
2414 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002415 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 }
2417
2418done:
2419 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 return ret;
2421}
2422
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002423/* ---- HCI ioctl helpers ---- */
2424
2425int hci_dev_open(__u16 dev)
2426{
2427 struct hci_dev *hdev;
2428 int err;
2429
2430 hdev = hci_dev_get(dev);
2431 if (!hdev)
2432 return -ENODEV;
2433
Marcel Holtmann4a964402014-07-02 19:10:33 +02002434 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002435 * up as user channel. Trying to bring them up as normal devices
2436 * will result into a failure. Only user channel operation is
2437 * possible.
2438 *
2439 * When this function is called for a user channel, the flag
2440 * HCI_USER_CHANNEL will be set first before attempting to
2441 * open the device.
2442 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002443 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002444 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2445 err = -EOPNOTSUPP;
2446 goto done;
2447 }
2448
Johan Hedberge1d08f42013-10-01 22:44:50 +03002449 /* We need to ensure that no other power on/off work is pending
2450 * before proceeding to call hci_dev_do_open. This is
2451 * particularly important if the setup procedure has not yet
2452 * completed.
2453 */
2454 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2455 cancel_delayed_work(&hdev->power_off);
2456
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002457 /* After this call it is guaranteed that the setup procedure
2458 * has finished. This means that error conditions like RFKILL
2459 * or no valid public or static random address apply.
2460 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002461 flush_workqueue(hdev->req_workqueue);
2462
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002463 /* For controllers not using the management interface and that
2464 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2465 * so that pairing works for them. Once the management interface
2466 * is in use this bit will be cleared again and userspace has
2467 * to explicitly enable it.
2468 */
2469 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2470 !test_bit(HCI_MGMT, &hdev->dev_flags))
2471 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2472
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002473 err = hci_dev_do_open(hdev);
2474
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002475done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002476 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002477 return err;
2478}
2479
Johan Hedbergd7347f32014-07-04 12:37:23 +03002480/* This function requires the caller holds hdev->lock */
2481static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2482{
2483 struct hci_conn_params *p;
2484
2485 list_for_each_entry(p, &hdev->le_conn_params, list)
2486 list_del_init(&p->action);
2487
2488 BT_DBG("All LE pending actions cleared");
2489}
2490
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491static int hci_dev_do_close(struct hci_dev *hdev)
2492{
2493 BT_DBG("%s %p", hdev->name, hdev);
2494
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002495 cancel_delayed_work(&hdev->power_off);
2496
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 hci_req_cancel(hdev, ENODEV);
2498 hci_req_lock(hdev);
2499
2500 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002501 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 hci_req_unlock(hdev);
2503 return 0;
2504 }
2505
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002506 /* Flush RX and TX works */
2507 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002508 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002510 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002511 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002512 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002513 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002514 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002515 }
2516
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002517 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002518 cancel_delayed_work(&hdev->service_cache);
2519
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002520 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002521
2522 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2523 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002524
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002525 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002526 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002528 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002529 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530
2531 hci_notify(hdev, HCI_DEV_DOWN);
2532
2533 if (hdev->flush)
2534 hdev->flush(hdev);
2535
2536 /* Reset device */
2537 skb_queue_purge(&hdev->cmd_q);
2538 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002539 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2540 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002541 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002543 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 clear_bit(HCI_INIT, &hdev->flags);
2545 }
2546
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002547 /* flush cmd work */
2548 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549
2550 /* Drop queues */
2551 skb_queue_purge(&hdev->rx_q);
2552 skb_queue_purge(&hdev->cmd_q);
2553 skb_queue_purge(&hdev->raw_q);
2554
2555 /* Drop last sent command */
2556 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002557 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 kfree_skb(hdev->sent_cmd);
2559 hdev->sent_cmd = NULL;
2560 }
2561
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002562 kfree_skb(hdev->recv_evt);
2563 hdev->recv_evt = NULL;
2564
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 /* After this point our queues are empty
2566 * and no tasks are scheduled. */
2567 hdev->close(hdev);
2568
Johan Hedberg35b973c2013-03-15 17:06:59 -05002569 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002570 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002571 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2572
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002573 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2574 if (hdev->dev_type == HCI_BREDR) {
2575 hci_dev_lock(hdev);
2576 mgmt_powered(hdev, 0);
2577 hci_dev_unlock(hdev);
2578 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002579 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002580
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002581 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002582 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002583
Johan Hedberge59fda82012-02-22 18:11:53 +02002584 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002585 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002586 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002587
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 hci_req_unlock(hdev);
2589
2590 hci_dev_put(hdev);
2591 return 0;
2592}
2593
2594int hci_dev_close(__u16 dev)
2595{
2596 struct hci_dev *hdev;
2597 int err;
2598
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002599 hdev = hci_dev_get(dev);
2600 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002602
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002603 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2604 err = -EBUSY;
2605 goto done;
2606 }
2607
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002608 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2609 cancel_delayed_work(&hdev->power_off);
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002612
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002613done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 hci_dev_put(hdev);
2615 return err;
2616}
2617
2618int hci_dev_reset(__u16 dev)
2619{
2620 struct hci_dev *hdev;
2621 int ret = 0;
2622
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002623 hdev = hci_dev_get(dev);
2624 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 return -ENODEV;
2626
2627 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628
Marcel Holtmann808a0492013-08-26 20:57:58 -07002629 if (!test_bit(HCI_UP, &hdev->flags)) {
2630 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002634 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2635 ret = -EBUSY;
2636 goto done;
2637 }
2638
Marcel Holtmann4a964402014-07-02 19:10:33 +02002639 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002640 ret = -EOPNOTSUPP;
2641 goto done;
2642 }
2643
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 /* Drop queues */
2645 skb_queue_purge(&hdev->rx_q);
2646 skb_queue_purge(&hdev->cmd_q);
2647
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002648 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002649 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002651 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652
2653 if (hdev->flush)
2654 hdev->flush(hdev);
2655
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002656 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002657 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002659 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
2661done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 hci_req_unlock(hdev);
2663 hci_dev_put(hdev);
2664 return ret;
2665}
2666
2667int hci_dev_reset_stat(__u16 dev)
2668{
2669 struct hci_dev *hdev;
2670 int ret = 0;
2671
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002672 hdev = hci_dev_get(dev);
2673 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 return -ENODEV;
2675
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002676 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2677 ret = -EBUSY;
2678 goto done;
2679 }
2680
Marcel Holtmann4a964402014-07-02 19:10:33 +02002681 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002682 ret = -EOPNOTSUPP;
2683 goto done;
2684 }
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2687
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002688done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 return ret;
2691}
2692
Johan Hedberg123abc02014-07-10 12:09:07 +03002693static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2694{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002695 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002696
2697 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2698
2699 if ((scan & SCAN_PAGE))
2700 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2701 &hdev->dev_flags);
2702 else
2703 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2704 &hdev->dev_flags);
2705
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002706 if ((scan & SCAN_INQUIRY)) {
2707 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2708 &hdev->dev_flags);
2709 } else {
2710 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2711 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2712 &hdev->dev_flags);
2713 }
2714
Johan Hedberg123abc02014-07-10 12:09:07 +03002715 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2716 return;
2717
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002718 if (conn_changed || discov_changed) {
2719 /* In case this was disabled through mgmt */
2720 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2721
2722 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2723 mgmt_update_adv_data(hdev);
2724
Johan Hedberg123abc02014-07-10 12:09:07 +03002725 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002726 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002727}
2728
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729int hci_dev_cmd(unsigned int cmd, void __user *arg)
2730{
2731 struct hci_dev *hdev;
2732 struct hci_dev_req dr;
2733 int err = 0;
2734
2735 if (copy_from_user(&dr, arg, sizeof(dr)))
2736 return -EFAULT;
2737
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002738 hdev = hci_dev_get(dr.dev_id);
2739 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 return -ENODEV;
2741
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002742 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2743 err = -EBUSY;
2744 goto done;
2745 }
2746
Marcel Holtmann4a964402014-07-02 19:10:33 +02002747 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002748 err = -EOPNOTSUPP;
2749 goto done;
2750 }
2751
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002752 if (hdev->dev_type != HCI_BREDR) {
2753 err = -EOPNOTSUPP;
2754 goto done;
2755 }
2756
Johan Hedberg56f87902013-10-02 13:43:13 +03002757 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2758 err = -EOPNOTSUPP;
2759 goto done;
2760 }
2761
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 switch (cmd) {
2763 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002764 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2765 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 break;
2767
2768 case HCISETENCRYPT:
2769 if (!lmp_encrypt_capable(hdev)) {
2770 err = -EOPNOTSUPP;
2771 break;
2772 }
2773
2774 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2775 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002776 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2777 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 if (err)
2779 break;
2780 }
2781
Johan Hedberg01178cd2013-03-05 20:37:41 +02002782 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2783 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 break;
2785
2786 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002787 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2788 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002789
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002790 /* Ensure that the connectable and discoverable states
2791 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002792 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002793 if (!err)
2794 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 break;
2796
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002797 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002798 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2799 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002800 break;
2801
2802 case HCISETLINKMODE:
2803 hdev->link_mode = ((__u16) dr.dev_opt) &
2804 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2805 break;
2806
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 case HCISETPTYPE:
2808 hdev->pkt_type = (__u16) dr.dev_opt;
2809 break;
2810
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002812 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2813 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 break;
2815
2816 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002817 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2818 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 break;
2820
2821 default:
2822 err = -EINVAL;
2823 break;
2824 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002825
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002826done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 hci_dev_put(hdev);
2828 return err;
2829}
2830
2831int hci_get_dev_list(void __user *arg)
2832{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002833 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 struct hci_dev_list_req *dl;
2835 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 int n = 0, size, err;
2837 __u16 dev_num;
2838
2839 if (get_user(dev_num, (__u16 __user *) arg))
2840 return -EFAULT;
2841
2842 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2843 return -EINVAL;
2844
2845 size = sizeof(*dl) + dev_num * sizeof(*dr);
2846
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002847 dl = kzalloc(size, GFP_KERNEL);
2848 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 return -ENOMEM;
2850
2851 dr = dl->dev_req;
2852
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002853 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002854 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002855 unsigned long flags = hdev->flags;
2856
2857 /* When the auto-off is configured it means the transport
2858 * is running, but in that case still indicate that the
2859 * device is actually down.
2860 */
2861 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2862 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002863
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002865 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 if (++n >= dev_num)
2868 break;
2869 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002870 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871
2872 dl->dev_num = n;
2873 size = sizeof(*dl) + n * sizeof(*dr);
2874
2875 err = copy_to_user(arg, dl, size);
2876 kfree(dl);
2877
2878 return err ? -EFAULT : 0;
2879}
2880
2881int hci_get_dev_info(void __user *arg)
2882{
2883 struct hci_dev *hdev;
2884 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002885 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 int err = 0;
2887
2888 if (copy_from_user(&di, arg, sizeof(di)))
2889 return -EFAULT;
2890
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002891 hdev = hci_dev_get(di.dev_id);
2892 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 return -ENODEV;
2894
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002895 /* When the auto-off is configured it means the transport
2896 * is running, but in that case still indicate that the
2897 * device is actually down.
2898 */
2899 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2900 flags = hdev->flags & ~BIT(HCI_UP);
2901 else
2902 flags = hdev->flags;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002903
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 strcpy(di.name, hdev->name);
2905 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002906 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002907 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002909 if (lmp_bredr_capable(hdev)) {
2910 di.acl_mtu = hdev->acl_mtu;
2911 di.acl_pkts = hdev->acl_pkts;
2912 di.sco_mtu = hdev->sco_mtu;
2913 di.sco_pkts = hdev->sco_pkts;
2914 } else {
2915 di.acl_mtu = hdev->le_mtu;
2916 di.acl_pkts = hdev->le_pkts;
2917 di.sco_mtu = 0;
2918 di.sco_pkts = 0;
2919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 di.link_policy = hdev->link_policy;
2921 di.link_mode = hdev->link_mode;
2922
2923 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2924 memcpy(&di.features, &hdev->features, sizeof(di.features));
2925
2926 if (copy_to_user(arg, &di, sizeof(di)))
2927 err = -EFAULT;
2928
2929 hci_dev_put(hdev);
2930
2931 return err;
2932}
2933
2934/* ---- Interface to HCI drivers ---- */
2935
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002936static int hci_rfkill_set_block(void *data, bool blocked)
2937{
2938 struct hci_dev *hdev = data;
2939
2940 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2941
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002942 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2943 return -EBUSY;
2944
Johan Hedberg5e130362013-09-13 08:58:17 +03002945 if (blocked) {
2946 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002947 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2948 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002949 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002950 } else {
2951 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002952 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002953
2954 return 0;
2955}
2956
2957static const struct rfkill_ops hci_rfkill_ops = {
2958 .set_block = hci_rfkill_set_block,
2959};
2960
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002961static void hci_power_on(struct work_struct *work)
2962{
2963 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002964 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002965
2966 BT_DBG("%s", hdev->name);
2967
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002968 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002969 if (err < 0) {
2970 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002971 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002972 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002973
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002974 /* During the HCI setup phase, a few error conditions are
2975 * ignored and they need to be checked now. If they are still
2976 * valid, it is important to turn the device back off.
2977 */
2978 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002979 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002980 (hdev->dev_type == HCI_BREDR &&
2981 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2982 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002983 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2984 hci_dev_do_close(hdev);
2985 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002986 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2987 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002988 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002989
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002990 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002991 /* For unconfigured devices, set the HCI_RAW flag
2992 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002993 */
2994 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2995 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002996
2997 /* For fully configured devices, this will send
2998 * the Index Added event. For unconfigured devices,
2999 * it will send Unconfigued Index Added event.
3000 *
3001 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3002 * and no event will be send.
3003 */
3004 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003005 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003006 /* When the controller is now configured, then it
3007 * is important to clear the HCI_RAW flag.
3008 */
3009 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3010 clear_bit(HCI_RAW, &hdev->flags);
3011
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003012 /* Powering on the controller with HCI_CONFIG set only
3013 * happens with the transition from unconfigured to
3014 * configured. This will send the Index Added event.
3015 */
3016 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003017 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003018}
3019
3020static void hci_power_off(struct work_struct *work)
3021{
Johan Hedberg32435532011-11-07 22:16:04 +02003022 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003023 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003024
3025 BT_DBG("%s", hdev->name);
3026
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003027 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003028}
3029
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003030static void hci_discov_off(struct work_struct *work)
3031{
3032 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003033
3034 hdev = container_of(work, struct hci_dev, discov_off.work);
3035
3036 BT_DBG("%s", hdev->name);
3037
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003038 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003039}
3040
Johan Hedberg35f74982014-02-18 17:14:32 +02003041void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003042{
Johan Hedberg48210022013-01-27 00:31:28 +02003043 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003044
Johan Hedberg48210022013-01-27 00:31:28 +02003045 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3046 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003047 kfree(uuid);
3048 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003049}
3050
Johan Hedberg35f74982014-02-18 17:14:32 +02003051void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003052{
3053 struct list_head *p, *n;
3054
3055 list_for_each_safe(p, n, &hdev->link_keys) {
3056 struct link_key *key;
3057
3058 key = list_entry(p, struct link_key, list);
3059
3060 list_del(p);
3061 kfree(key);
3062 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003063}
3064
Johan Hedberg35f74982014-02-18 17:14:32 +02003065void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003066{
3067 struct smp_ltk *k, *tmp;
3068
3069 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3070 list_del(&k->list);
3071 kfree(k);
3072 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003073}
3074
Johan Hedberg970c4e42014-02-18 10:19:33 +02003075void hci_smp_irks_clear(struct hci_dev *hdev)
3076{
3077 struct smp_irk *k, *tmp;
3078
3079 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3080 list_del(&k->list);
3081 kfree(k);
3082 }
3083}
3084
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003085struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3086{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003087 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003088
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003089 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003090 if (bacmp(bdaddr, &k->bdaddr) == 0)
3091 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003092
3093 return NULL;
3094}
3095
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303096static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003097 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003098{
3099 /* Legacy key */
3100 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303101 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003102
3103 /* Debug keys are insecure so don't store them persistently */
3104 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303105 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003106
3107 /* Changed combination key and there's no previous one */
3108 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303109 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003110
3111 /* Security mode 3 case */
3112 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303113 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003114
3115 /* Neither local nor remote side had no-bonding as requirement */
3116 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303117 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003118
3119 /* Local side had dedicated bonding as requirement */
3120 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303121 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003122
3123 /* Remote side had dedicated bonding as requirement */
3124 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303125 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003126
3127 /* If none of the above criteria match, then don't store the key
3128 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303129 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003130}
3131
Johan Hedberge804d252014-07-16 11:42:28 +03003132static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003133{
Johan Hedberge804d252014-07-16 11:42:28 +03003134 if (type == SMP_LTK)
3135 return HCI_ROLE_MASTER;
3136
3137 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003138}
3139
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003140struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003141 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003142{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003143 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003144
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003145 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003146 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003147 continue;
3148
Johan Hedberge804d252014-07-16 11:42:28 +03003149 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003150 continue;
3151
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003152 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003153 }
3154
3155 return NULL;
3156}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003157
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003158struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003159 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003160{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003161 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003162
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003163 list_for_each_entry(k, &hdev->long_term_keys, list)
3164 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003165 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberge804d252014-07-16 11:42:28 +03003166 ltk_role(k->type) == role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003167 return k;
3168
3169 return NULL;
3170}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003171
Johan Hedberg970c4e42014-02-18 10:19:33 +02003172struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3173{
3174 struct smp_irk *irk;
3175
3176 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3177 if (!bacmp(&irk->rpa, rpa))
3178 return irk;
3179 }
3180
3181 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3182 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3183 bacpy(&irk->rpa, rpa);
3184 return irk;
3185 }
3186 }
3187
3188 return NULL;
3189}
3190
3191struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3192 u8 addr_type)
3193{
3194 struct smp_irk *irk;
3195
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003196 /* Identity Address must be public or static random */
3197 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3198 return NULL;
3199
Johan Hedberg970c4e42014-02-18 10:19:33 +02003200 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3201 if (addr_type == irk->addr_type &&
3202 bacmp(bdaddr, &irk->bdaddr) == 0)
3203 return irk;
3204 }
3205
3206 return NULL;
3207}
3208
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003209struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003210 bdaddr_t *bdaddr, u8 *val, u8 type,
3211 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003212{
3213 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303214 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003215
3216 old_key = hci_find_link_key(hdev, bdaddr);
3217 if (old_key) {
3218 old_key_type = old_key->type;
3219 key = old_key;
3220 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003221 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003222 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003223 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003224 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003225 list_add(&key->list, &hdev->link_keys);
3226 }
3227
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003228 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003229
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003230 /* Some buggy controller combinations generate a changed
3231 * combination key for legacy pairing even when there's no
3232 * previous key */
3233 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003234 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003235 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003236 if (conn)
3237 conn->key_type = type;
3238 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003239
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003240 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003241 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003242 key->pin_len = pin_len;
3243
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003244 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003245 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003246 else
3247 key->type = type;
3248
Johan Hedberg7652ff62014-06-24 13:15:49 +03003249 if (persistent)
3250 *persistent = hci_persistent_key(hdev, conn, type,
3251 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003252
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003253 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003254}
3255
Johan Hedbergca9142b2014-02-19 14:57:44 +02003256struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003257 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003258 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003259{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003260 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003261 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003262
Johan Hedberge804d252014-07-16 11:42:28 +03003263 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003264 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003265 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003266 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003267 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003268 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003269 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003270 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003271 }
3272
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003273 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003274 key->bdaddr_type = addr_type;
3275 memcpy(key->val, tk, sizeof(key->val));
3276 key->authenticated = authenticated;
3277 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003278 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003279 key->enc_size = enc_size;
3280 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003281
Johan Hedbergca9142b2014-02-19 14:57:44 +02003282 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003283}
3284
Johan Hedbergca9142b2014-02-19 14:57:44 +02003285struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3286 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003287{
3288 struct smp_irk *irk;
3289
3290 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3291 if (!irk) {
3292 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3293 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003294 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003295
3296 bacpy(&irk->bdaddr, bdaddr);
3297 irk->addr_type = addr_type;
3298
3299 list_add(&irk->list, &hdev->identity_resolving_keys);
3300 }
3301
3302 memcpy(irk->val, val, 16);
3303 bacpy(&irk->rpa, rpa);
3304
Johan Hedbergca9142b2014-02-19 14:57:44 +02003305 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003306}
3307
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003308int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3309{
3310 struct link_key *key;
3311
3312 key = hci_find_link_key(hdev, bdaddr);
3313 if (!key)
3314 return -ENOENT;
3315
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003316 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003317
3318 list_del(&key->list);
3319 kfree(key);
3320
3321 return 0;
3322}
3323
Johan Hedberge0b2b272014-02-18 17:14:31 +02003324int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003325{
3326 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003327 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003328
3329 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003330 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003331 continue;
3332
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003333 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003334
3335 list_del(&k->list);
3336 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003337 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003338 }
3339
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003340 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003341}
3342
Johan Hedberga7ec7332014-02-18 17:14:35 +02003343void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3344{
3345 struct smp_irk *k, *tmp;
3346
Johan Hedberg668b7b12014-02-21 16:03:31 +02003347 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003348 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3349 continue;
3350
3351 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3352
3353 list_del(&k->list);
3354 kfree(k);
3355 }
3356}
3357
Ville Tervo6bd32322011-02-16 16:32:41 +02003358/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003359static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003360{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003361 struct hci_dev *hdev = container_of(work, struct hci_dev,
3362 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003363
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003364 if (hdev->sent_cmd) {
3365 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3366 u16 opcode = __le16_to_cpu(sent->opcode);
3367
3368 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3369 } else {
3370 BT_ERR("%s command tx timeout", hdev->name);
3371 }
3372
Ville Tervo6bd32322011-02-16 16:32:41 +02003373 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003374 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003375}
3376
Szymon Janc2763eda2011-03-22 13:12:22 +01003377struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003378 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003379{
3380 struct oob_data *data;
3381
3382 list_for_each_entry(data, &hdev->remote_oob_data, list)
3383 if (bacmp(bdaddr, &data->bdaddr) == 0)
3384 return data;
3385
3386 return NULL;
3387}
3388
3389int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3390{
3391 struct oob_data *data;
3392
3393 data = hci_find_remote_oob_data(hdev, bdaddr);
3394 if (!data)
3395 return -ENOENT;
3396
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003397 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003398
3399 list_del(&data->list);
3400 kfree(data);
3401
3402 return 0;
3403}
3404
Johan Hedberg35f74982014-02-18 17:14:32 +02003405void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003406{
3407 struct oob_data *data, *n;
3408
3409 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3410 list_del(&data->list);
3411 kfree(data);
3412 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003413}
3414
Marcel Holtmann07988722014-01-10 02:07:29 -08003415int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3416 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003417{
3418 struct oob_data *data;
3419
3420 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003421 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003422 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003423 if (!data)
3424 return -ENOMEM;
3425
3426 bacpy(&data->bdaddr, bdaddr);
3427 list_add(&data->list, &hdev->remote_oob_data);
3428 }
3429
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003430 memcpy(data->hash192, hash, sizeof(data->hash192));
3431 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003432
Marcel Holtmann07988722014-01-10 02:07:29 -08003433 memset(data->hash256, 0, sizeof(data->hash256));
3434 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3435
3436 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3437
3438 return 0;
3439}
3440
3441int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3442 u8 *hash192, u8 *randomizer192,
3443 u8 *hash256, u8 *randomizer256)
3444{
3445 struct oob_data *data;
3446
3447 data = hci_find_remote_oob_data(hdev, bdaddr);
3448 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003449 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003450 if (!data)
3451 return -ENOMEM;
3452
3453 bacpy(&data->bdaddr, bdaddr);
3454 list_add(&data->list, &hdev->remote_oob_data);
3455 }
3456
3457 memcpy(data->hash192, hash192, sizeof(data->hash192));
3458 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3459
3460 memcpy(data->hash256, hash256, sizeof(data->hash256));
3461 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3462
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003463 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003464
3465 return 0;
3466}
3467
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003468struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003469 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003470{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003471 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003472
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003473 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003474 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003475 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003476 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003477
3478 return NULL;
3479}
3480
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003481void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003482{
3483 struct list_head *p, *n;
3484
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003485 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003486 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003487
3488 list_del(p);
3489 kfree(b);
3490 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003491}
3492
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003493int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003494{
3495 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003496
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003497 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003498 return -EBADF;
3499
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003500 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003501 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003502
Johan Hedberg27f70f32014-07-21 10:50:06 +03003503 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003504 if (!entry)
3505 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003506
3507 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003508 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003509
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003510 list_add(&entry->list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003511
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003512 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003513}
3514
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003515int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003516{
3517 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003518
Johan Hedberg35f74982014-02-18 17:14:32 +02003519 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003520 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003521 return 0;
3522 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003523
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003524 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003525 if (!entry)
3526 return -ENOENT;
3527
3528 list_del(&entry->list);
3529 kfree(entry);
3530
3531 return 0;
3532}
3533
Andre Guedes15819a72014-02-03 13:56:18 -03003534/* This function requires the caller holds hdev->lock */
3535struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3536 bdaddr_t *addr, u8 addr_type)
3537{
3538 struct hci_conn_params *params;
3539
Johan Hedberg738f6182014-07-03 19:33:51 +03003540 /* The conn params list only contains identity addresses */
3541 if (!hci_is_identity_address(addr, addr_type))
3542 return NULL;
3543
Andre Guedes15819a72014-02-03 13:56:18 -03003544 list_for_each_entry(params, &hdev->le_conn_params, list) {
3545 if (bacmp(&params->addr, addr) == 0 &&
3546 params->addr_type == addr_type) {
3547 return params;
3548 }
3549 }
3550
3551 return NULL;
3552}
3553
Andre Guedescef952c2014-02-26 20:21:49 -03003554static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3555{
3556 struct hci_conn *conn;
3557
3558 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3559 if (!conn)
3560 return false;
3561
3562 if (conn->dst_type != type)
3563 return false;
3564
3565 if (conn->state != BT_CONNECTED)
3566 return false;
3567
3568 return true;
3569}
3570
Andre Guedes15819a72014-02-03 13:56:18 -03003571/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003572struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3573 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003574{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003575 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003576
Johan Hedberg738f6182014-07-03 19:33:51 +03003577 /* The list only contains identity addresses */
3578 if (!hci_is_identity_address(addr, addr_type))
3579 return NULL;
3580
Johan Hedberg501f8822014-07-04 12:37:26 +03003581 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003582 if (bacmp(&param->addr, addr) == 0 &&
3583 param->addr_type == addr_type)
3584 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003585 }
3586
3587 return NULL;
3588}
3589
3590/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003591struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3592 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003593{
3594 struct hci_conn_params *params;
3595
Johan Hedbergc46245b2014-07-02 17:37:33 +03003596 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003597 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003598
3599 params = hci_conn_params_lookup(hdev, addr, addr_type);
3600 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003601 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003602
3603 params = kzalloc(sizeof(*params), GFP_KERNEL);
3604 if (!params) {
3605 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003606 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003607 }
3608
3609 bacpy(&params->addr, addr);
3610 params->addr_type = addr_type;
3611
3612 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003613 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003614
3615 params->conn_min_interval = hdev->le_conn_min_interval;
3616 params->conn_max_interval = hdev->le_conn_max_interval;
3617 params->conn_latency = hdev->le_conn_latency;
3618 params->supervision_timeout = hdev->le_supv_timeout;
3619 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3620
3621 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3622
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003623 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003624}
3625
3626/* This function requires the caller holds hdev->lock */
3627int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003628 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003629{
3630 struct hci_conn_params *params;
3631
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003632 params = hci_conn_params_add(hdev, addr, addr_type);
3633 if (!params)
3634 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003635
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003636 if (params->auto_connect == auto_connect)
3637 return 0;
3638
Johan Hedberg95305ba2014-07-04 12:37:21 +03003639 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003640
Andre Guedescef952c2014-02-26 20:21:49 -03003641 switch (auto_connect) {
3642 case HCI_AUTO_CONN_DISABLED:
3643 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003644 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003645 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003646 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003647 list_add(&params->action, &hdev->pend_le_reports);
3648 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003649 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003650 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003651 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003652 if (!is_connected(hdev, addr, addr_type)) {
3653 list_add(&params->action, &hdev->pend_le_conns);
3654 hci_update_background_scan(hdev);
3655 }
Andre Guedescef952c2014-02-26 20:21:49 -03003656 break;
3657 }
Andre Guedes15819a72014-02-03 13:56:18 -03003658
Johan Hedberg851efca2014-07-02 22:42:00 +03003659 params->auto_connect = auto_connect;
3660
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003661 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3662 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003663
3664 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003665}
3666
3667/* This function requires the caller holds hdev->lock */
3668void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3669{
3670 struct hci_conn_params *params;
3671
3672 params = hci_conn_params_lookup(hdev, addr, addr_type);
3673 if (!params)
3674 return;
3675
Johan Hedberg95305ba2014-07-04 12:37:21 +03003676 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003677 list_del(&params->list);
3678 kfree(params);
3679
Johan Hedberg95305ba2014-07-04 12:37:21 +03003680 hci_update_background_scan(hdev);
3681
Andre Guedes15819a72014-02-03 13:56:18 -03003682 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3683}
3684
3685/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003686void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3687{
3688 struct hci_conn_params *params, *tmp;
3689
3690 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3691 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3692 continue;
3693 list_del(&params->list);
3694 kfree(params);
3695 }
3696
3697 BT_DBG("All LE disabled connection parameters were removed");
3698}
3699
3700/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003701void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003702{
3703 struct hci_conn_params *params, *tmp;
3704
3705 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003706 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003707 list_del(&params->list);
3708 kfree(params);
3709 }
3710
Johan Hedberga2f41a82014-07-04 12:37:19 +03003711 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003712
Andre Guedes15819a72014-02-03 13:56:18 -03003713 BT_DBG("All LE connection parameters were removed");
3714}
3715
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003716static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003717{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003718 if (status) {
3719 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003720
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003721 hci_dev_lock(hdev);
3722 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3723 hci_dev_unlock(hdev);
3724 return;
3725 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003726}
3727
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003728static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003729{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003730 /* General inquiry access code (GIAC) */
3731 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3732 struct hci_request req;
3733 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003734 int err;
3735
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003736 if (status) {
3737 BT_ERR("Failed to disable LE scanning: status %d", status);
3738 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003739 }
3740
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003741 switch (hdev->discovery.type) {
3742 case DISCOV_TYPE_LE:
3743 hci_dev_lock(hdev);
3744 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3745 hci_dev_unlock(hdev);
3746 break;
3747
3748 case DISCOV_TYPE_INTERLEAVED:
3749 hci_req_init(&req, hdev);
3750
3751 memset(&cp, 0, sizeof(cp));
3752 memcpy(&cp.lap, lap, sizeof(cp.lap));
3753 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3754 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3755
3756 hci_dev_lock(hdev);
3757
3758 hci_inquiry_cache_flush(hdev);
3759
3760 err = hci_req_run(&req, inquiry_complete);
3761 if (err) {
3762 BT_ERR("Inquiry request failed: err %d", err);
3763 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3764 }
3765
3766 hci_dev_unlock(hdev);
3767 break;
3768 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003769}
3770
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003771static void le_scan_disable_work(struct work_struct *work)
3772{
3773 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003774 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003775 struct hci_request req;
3776 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003777
3778 BT_DBG("%s", hdev->name);
3779
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003780 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003781
Andre Guedesb1efcc22014-02-26 20:21:40 -03003782 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003783
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003784 err = hci_req_run(&req, le_scan_disable_work_complete);
3785 if (err)
3786 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003787}
3788
Johan Hedberg8d972502014-02-28 12:54:14 +02003789static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3790{
3791 struct hci_dev *hdev = req->hdev;
3792
3793 /* If we're advertising or initiating an LE connection we can't
3794 * go ahead and change the random address at this time. This is
3795 * because the eventual initiator address used for the
3796 * subsequently created connection will be undefined (some
3797 * controllers use the new address and others the one we had
3798 * when the operation started).
3799 *
3800 * In this kind of scenario skip the update and let the random
3801 * address be updated at the next cycle.
3802 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003803 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003804 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3805 BT_DBG("Deferring random address update");
3806 return;
3807 }
3808
3809 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3810}
3811
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003812int hci_update_random_address(struct hci_request *req, bool require_privacy,
3813 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003814{
3815 struct hci_dev *hdev = req->hdev;
3816 int err;
3817
3818 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003819 * current RPA has expired or there is something else than
3820 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003821 */
3822 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003823 int to;
3824
3825 *own_addr_type = ADDR_LE_DEV_RANDOM;
3826
3827 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003828 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003829 return 0;
3830
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003831 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003832 if (err < 0) {
3833 BT_ERR("%s failed to generate new RPA", hdev->name);
3834 return err;
3835 }
3836
Johan Hedberg8d972502014-02-28 12:54:14 +02003837 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003838
3839 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3840 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3841
3842 return 0;
3843 }
3844
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003845 /* In case of required privacy without resolvable private address,
3846 * use an unresolvable private address. This is useful for active
3847 * scanning and non-connectable advertising.
3848 */
3849 if (require_privacy) {
3850 bdaddr_t urpa;
3851
3852 get_random_bytes(&urpa, 6);
3853 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3854
3855 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003856 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003857 return 0;
3858 }
3859
Johan Hedbergebd3a742014-02-23 19:42:21 +02003860 /* If forcing static address is in use or there is no public
3861 * address use the static address as random address (but skip
3862 * the HCI command if the current random address is already the
3863 * static one.
3864 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003865 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003866 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3867 *own_addr_type = ADDR_LE_DEV_RANDOM;
3868 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3869 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3870 &hdev->static_addr);
3871 return 0;
3872 }
3873
3874 /* Neither privacy nor static address is being used so use a
3875 * public address.
3876 */
3877 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3878
3879 return 0;
3880}
3881
Johan Hedberga1f4c312014-02-27 14:05:41 +02003882/* Copy the Identity Address of the controller.
3883 *
3884 * If the controller has a public BD_ADDR, then by default use that one.
3885 * If this is a LE only controller without a public address, default to
3886 * the static random address.
3887 *
3888 * For debugging purposes it is possible to force controllers with a
3889 * public address to use the static random address instead.
3890 */
3891void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3892 u8 *bdaddr_type)
3893{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003894 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003895 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3896 bacpy(bdaddr, &hdev->static_addr);
3897 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3898 } else {
3899 bacpy(bdaddr, &hdev->bdaddr);
3900 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3901 }
3902}
3903
David Herrmann9be0dab2012-04-22 14:39:57 +02003904/* Alloc HCI device */
3905struct hci_dev *hci_alloc_dev(void)
3906{
3907 struct hci_dev *hdev;
3908
Johan Hedberg27f70f32014-07-21 10:50:06 +03003909 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003910 if (!hdev)
3911 return NULL;
3912
David Herrmannb1b813d2012-04-22 14:39:58 +02003913 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3914 hdev->esco_type = (ESCO_HV1);
3915 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003916 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3917 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003918 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003919 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3920 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003921
David Herrmannb1b813d2012-04-22 14:39:58 +02003922 hdev->sniff_max_interval = 800;
3923 hdev->sniff_min_interval = 80;
3924
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003925 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003926 hdev->le_adv_min_interval = 0x0800;
3927 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003928 hdev->le_scan_interval = 0x0060;
3929 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003930 hdev->le_conn_min_interval = 0x0028;
3931 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003932 hdev->le_conn_latency = 0x0000;
3933 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003934
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003935 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003936 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003937 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3938 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003939
David Herrmannb1b813d2012-04-22 14:39:58 +02003940 mutex_init(&hdev->lock);
3941 mutex_init(&hdev->req_lock);
3942
3943 INIT_LIST_HEAD(&hdev->mgmt_pending);
3944 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003945 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003946 INIT_LIST_HEAD(&hdev->uuids);
3947 INIT_LIST_HEAD(&hdev->link_keys);
3948 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003949 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003950 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003951 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003952 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003953 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003954 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003955 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003956
3957 INIT_WORK(&hdev->rx_work, hci_rx_work);
3958 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3959 INIT_WORK(&hdev->tx_work, hci_tx_work);
3960 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003961
David Herrmannb1b813d2012-04-22 14:39:58 +02003962 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3963 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3964 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3965
David Herrmannb1b813d2012-04-22 14:39:58 +02003966 skb_queue_head_init(&hdev->rx_q);
3967 skb_queue_head_init(&hdev->cmd_q);
3968 skb_queue_head_init(&hdev->raw_q);
3969
3970 init_waitqueue_head(&hdev->req_wait_q);
3971
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003972 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003973
David Herrmannb1b813d2012-04-22 14:39:58 +02003974 hci_init_sysfs(hdev);
3975 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003976
3977 return hdev;
3978}
3979EXPORT_SYMBOL(hci_alloc_dev);
3980
3981/* Free HCI device */
3982void hci_free_dev(struct hci_dev *hdev)
3983{
David Herrmann9be0dab2012-04-22 14:39:57 +02003984 /* will free via device release */
3985 put_device(&hdev->dev);
3986}
3987EXPORT_SYMBOL(hci_free_dev);
3988
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989/* Register HCI device */
3990int hci_register_dev(struct hci_dev *hdev)
3991{
David Herrmannb1b813d2012-04-22 14:39:58 +02003992 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993
Marcel Holtmann74292d52014-07-06 15:50:27 +02003994 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995 return -EINVAL;
3996
Mat Martineau08add512011-11-02 16:18:36 -07003997 /* Do not allow HCI_AMP devices to register at index 0,
3998 * so the index can be used as the AMP controller ID.
3999 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004000 switch (hdev->dev_type) {
4001 case HCI_BREDR:
4002 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4003 break;
4004 case HCI_AMP:
4005 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4006 break;
4007 default:
4008 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004010
Sasha Levin3df92b32012-05-27 22:36:56 +02004011 if (id < 0)
4012 return id;
4013
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014 sprintf(hdev->name, "hci%d", id);
4015 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004016
4017 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4018
Kees Cookd8537542013-07-03 15:04:57 -07004019 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4020 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004021 if (!hdev->workqueue) {
4022 error = -ENOMEM;
4023 goto err;
4024 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004025
Kees Cookd8537542013-07-03 15:04:57 -07004026 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4027 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004028 if (!hdev->req_workqueue) {
4029 destroy_workqueue(hdev->workqueue);
4030 error = -ENOMEM;
4031 goto err;
4032 }
4033
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004034 if (!IS_ERR_OR_NULL(bt_debugfs))
4035 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4036
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004037 dev_set_name(&hdev->dev, "%s", hdev->name);
4038
Johan Hedberg99780a72014-02-18 10:40:07 +02004039 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4040 CRYPTO_ALG_ASYNC);
4041 if (IS_ERR(hdev->tfm_aes)) {
4042 BT_ERR("Unable to create crypto context");
4043 error = PTR_ERR(hdev->tfm_aes);
4044 hdev->tfm_aes = NULL;
4045 goto err_wqueue;
4046 }
4047
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004048 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004049 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02004050 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004052 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004053 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4054 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004055 if (hdev->rfkill) {
4056 if (rfkill_register(hdev->rfkill) < 0) {
4057 rfkill_destroy(hdev->rfkill);
4058 hdev->rfkill = NULL;
4059 }
4060 }
4061
Johan Hedberg5e130362013-09-13 08:58:17 +03004062 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4063 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4064
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004065 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004066 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004067
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004068 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004069 /* Assume BR/EDR support until proven otherwise (such as
4070 * through reading supported features during init.
4071 */
4072 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4073 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004074
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004075 write_lock(&hci_dev_list_lock);
4076 list_add(&hdev->list, &hci_dev_list);
4077 write_unlock(&hci_dev_list_lock);
4078
Marcel Holtmann4a964402014-07-02 19:10:33 +02004079 /* Devices that are marked for raw-only usage are unconfigured
4080 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004081 */
4082 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004083 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004084
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004086 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087
Johan Hedberg19202572013-01-14 22:33:51 +02004088 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004089
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004091
Johan Hedberg99780a72014-02-18 10:40:07 +02004092err_tfm:
4093 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004094err_wqueue:
4095 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004096 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004097err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004098 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004099
David Herrmann33ca9542011-10-08 14:58:49 +02004100 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101}
4102EXPORT_SYMBOL(hci_register_dev);
4103
4104/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004105void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106{
Sasha Levin3df92b32012-05-27 22:36:56 +02004107 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004108
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004109 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110
Johan Hovold94324962012-03-15 14:48:41 +01004111 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4112
Sasha Levin3df92b32012-05-27 22:36:56 +02004113 id = hdev->id;
4114
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004115 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004117 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118
4119 hci_dev_do_close(hdev);
4120
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304121 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004122 kfree_skb(hdev->reassembly[i]);
4123
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004124 cancel_work_sync(&hdev->power_on);
4125
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004126 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004127 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4128 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004129 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004130 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004131 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004132 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004133
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004134 /* mgmt_index_removed should take care of emptying the
4135 * pending list */
4136 BUG_ON(!list_empty(&hdev->mgmt_pending));
4137
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138 hci_notify(hdev, HCI_DEV_UNREG);
4139
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004140 if (hdev->rfkill) {
4141 rfkill_unregister(hdev->rfkill);
4142 rfkill_destroy(hdev->rfkill);
4143 }
4144
Johan Hedberg99780a72014-02-18 10:40:07 +02004145 if (hdev->tfm_aes)
4146 crypto_free_blkcipher(hdev->tfm_aes);
4147
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004148 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004149
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004150 debugfs_remove_recursive(hdev->debugfs);
4151
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004152 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004153 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004154
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004155 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004156 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004157 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004158 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004159 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004160 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004161 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004162 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004163 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004164 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004165 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004166
David Herrmanndc946bd2012-01-07 15:47:24 +01004167 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004168
4169 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170}
4171EXPORT_SYMBOL(hci_unregister_dev);
4172
4173/* Suspend HCI device */
4174int hci_suspend_dev(struct hci_dev *hdev)
4175{
4176 hci_notify(hdev, HCI_DEV_SUSPEND);
4177 return 0;
4178}
4179EXPORT_SYMBOL(hci_suspend_dev);
4180
4181/* Resume HCI device */
4182int hci_resume_dev(struct hci_dev *hdev)
4183{
4184 hci_notify(hdev, HCI_DEV_RESUME);
4185 return 0;
4186}
4187EXPORT_SYMBOL(hci_resume_dev);
4188
Marcel Holtmann76bca882009-11-18 00:40:39 +01004189/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004190int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004191{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004192 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004193 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004194 kfree_skb(skb);
4195 return -ENXIO;
4196 }
4197
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004198 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004199 bt_cb(skb)->incoming = 1;
4200
4201 /* Time stamp */
4202 __net_timestamp(skb);
4203
Marcel Holtmann76bca882009-11-18 00:40:39 +01004204 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004205 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004206
Marcel Holtmann76bca882009-11-18 00:40:39 +01004207 return 0;
4208}
4209EXPORT_SYMBOL(hci_recv_frame);
4210
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304211static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004212 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304213{
4214 int len = 0;
4215 int hlen = 0;
4216 int remain = count;
4217 struct sk_buff *skb;
4218 struct bt_skb_cb *scb;
4219
4220 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004221 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304222 return -EILSEQ;
4223
4224 skb = hdev->reassembly[index];
4225
4226 if (!skb) {
4227 switch (type) {
4228 case HCI_ACLDATA_PKT:
4229 len = HCI_MAX_FRAME_SIZE;
4230 hlen = HCI_ACL_HDR_SIZE;
4231 break;
4232 case HCI_EVENT_PKT:
4233 len = HCI_MAX_EVENT_SIZE;
4234 hlen = HCI_EVENT_HDR_SIZE;
4235 break;
4236 case HCI_SCODATA_PKT:
4237 len = HCI_MAX_SCO_SIZE;
4238 hlen = HCI_SCO_HDR_SIZE;
4239 break;
4240 }
4241
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004242 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304243 if (!skb)
4244 return -ENOMEM;
4245
4246 scb = (void *) skb->cb;
4247 scb->expect = hlen;
4248 scb->pkt_type = type;
4249
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304250 hdev->reassembly[index] = skb;
4251 }
4252
4253 while (count) {
4254 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004255 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304256
4257 memcpy(skb_put(skb, len), data, len);
4258
4259 count -= len;
4260 data += len;
4261 scb->expect -= len;
4262 remain = count;
4263
4264 switch (type) {
4265 case HCI_EVENT_PKT:
4266 if (skb->len == HCI_EVENT_HDR_SIZE) {
4267 struct hci_event_hdr *h = hci_event_hdr(skb);
4268 scb->expect = h->plen;
4269
4270 if (skb_tailroom(skb) < scb->expect) {
4271 kfree_skb(skb);
4272 hdev->reassembly[index] = NULL;
4273 return -ENOMEM;
4274 }
4275 }
4276 break;
4277
4278 case HCI_ACLDATA_PKT:
4279 if (skb->len == HCI_ACL_HDR_SIZE) {
4280 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4281 scb->expect = __le16_to_cpu(h->dlen);
4282
4283 if (skb_tailroom(skb) < scb->expect) {
4284 kfree_skb(skb);
4285 hdev->reassembly[index] = NULL;
4286 return -ENOMEM;
4287 }
4288 }
4289 break;
4290
4291 case HCI_SCODATA_PKT:
4292 if (skb->len == HCI_SCO_HDR_SIZE) {
4293 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4294 scb->expect = h->dlen;
4295
4296 if (skb_tailroom(skb) < scb->expect) {
4297 kfree_skb(skb);
4298 hdev->reassembly[index] = NULL;
4299 return -ENOMEM;
4300 }
4301 }
4302 break;
4303 }
4304
4305 if (scb->expect == 0) {
4306 /* Complete frame */
4307
4308 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004309 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304310
4311 hdev->reassembly[index] = NULL;
4312 return remain;
4313 }
4314 }
4315
4316 return remain;
4317}
4318
Marcel Holtmannef222012007-07-11 06:42:04 +02004319int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4320{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304321 int rem = 0;
4322
Marcel Holtmannef222012007-07-11 06:42:04 +02004323 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4324 return -EILSEQ;
4325
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004326 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004327 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304328 if (rem < 0)
4329 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004330
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304331 data += (count - rem);
4332 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004333 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004334
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304335 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004336}
4337EXPORT_SYMBOL(hci_recv_fragment);
4338
Suraj Sumangala99811512010-07-14 13:02:19 +05304339#define STREAM_REASSEMBLY 0
4340
4341int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4342{
4343 int type;
4344 int rem = 0;
4345
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004346 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304347 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4348
4349 if (!skb) {
4350 struct { char type; } *pkt;
4351
4352 /* Start of the frame */
4353 pkt = data;
4354 type = pkt->type;
4355
4356 data++;
4357 count--;
4358 } else
4359 type = bt_cb(skb)->pkt_type;
4360
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004361 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004362 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304363 if (rem < 0)
4364 return rem;
4365
4366 data += (count - rem);
4367 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004368 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304369
4370 return rem;
4371}
4372EXPORT_SYMBOL(hci_recv_stream_fragment);
4373
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374/* ---- Interface to upper protocols ---- */
4375
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376int hci_register_cb(struct hci_cb *cb)
4377{
4378 BT_DBG("%p name %s", cb, cb->name);
4379
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004380 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004382 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383
4384 return 0;
4385}
4386EXPORT_SYMBOL(hci_register_cb);
4387
4388int hci_unregister_cb(struct hci_cb *cb)
4389{
4390 BT_DBG("%p name %s", cb, cb->name);
4391
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004392 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004394 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395
4396 return 0;
4397}
4398EXPORT_SYMBOL(hci_unregister_cb);
4399
Marcel Holtmann51086992013-10-10 14:54:19 -07004400static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004402 int err;
4403
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004404 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004406 /* Time stamp */
4407 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004409 /* Send copy to monitor */
4410 hci_send_to_monitor(hdev, skb);
4411
4412 if (atomic_read(&hdev->promisc)) {
4413 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004414 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415 }
4416
4417 /* Get rid of skb owner, prior to sending to the driver. */
4418 skb_orphan(skb);
4419
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004420 err = hdev->send(hdev, skb);
4421 if (err < 0) {
4422 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4423 kfree_skb(skb);
4424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425}
4426
Johan Hedberg3119ae92013-03-05 20:37:44 +02004427void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4428{
4429 skb_queue_head_init(&req->cmd_q);
4430 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004431 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004432}
4433
4434int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4435{
4436 struct hci_dev *hdev = req->hdev;
4437 struct sk_buff *skb;
4438 unsigned long flags;
4439
4440 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4441
Andre Guedes5d73e032013-03-08 11:20:16 -03004442 /* If an error occured during request building, remove all HCI
4443 * commands queued on the HCI request queue.
4444 */
4445 if (req->err) {
4446 skb_queue_purge(&req->cmd_q);
4447 return req->err;
4448 }
4449
Johan Hedberg3119ae92013-03-05 20:37:44 +02004450 /* Do not allow empty requests */
4451 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004452 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004453
4454 skb = skb_peek_tail(&req->cmd_q);
4455 bt_cb(skb)->req.complete = complete;
4456
4457 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4458 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4459 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4460
4461 queue_work(hdev->workqueue, &hdev->cmd_work);
4462
4463 return 0;
4464}
4465
Marcel Holtmann899de762014-07-11 05:51:58 +02004466bool hci_req_pending(struct hci_dev *hdev)
4467{
4468 return (hdev->req_status == HCI_REQ_PEND);
4469}
4470
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004471static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004472 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473{
4474 int len = HCI_COMMAND_HDR_SIZE + plen;
4475 struct hci_command_hdr *hdr;
4476 struct sk_buff *skb;
4477
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004479 if (!skb)
4480 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481
4482 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004483 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484 hdr->plen = plen;
4485
4486 if (plen)
4487 memcpy(skb_put(skb, plen), param, plen);
4488
4489 BT_DBG("skb len %d", skb->len);
4490
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004491 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004492
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004493 return skb;
4494}
4495
4496/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004497int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4498 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004499{
4500 struct sk_buff *skb;
4501
4502 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4503
4504 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4505 if (!skb) {
4506 BT_ERR("%s no memory for command", hdev->name);
4507 return -ENOMEM;
4508 }
4509
Johan Hedberg11714b32013-03-05 20:37:47 +02004510 /* Stand-alone HCI commands must be flaged as
4511 * single-command requests.
4512 */
4513 bt_cb(skb)->req.start = true;
4514
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004516 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517
4518 return 0;
4519}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520
Johan Hedberg71c76a12013-03-05 20:37:46 +02004521/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004522void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4523 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004524{
4525 struct hci_dev *hdev = req->hdev;
4526 struct sk_buff *skb;
4527
4528 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4529
Andre Guedes34739c12013-03-08 11:20:18 -03004530 /* If an error occured during request building, there is no point in
4531 * queueing the HCI command. We can simply return.
4532 */
4533 if (req->err)
4534 return;
4535
Johan Hedberg71c76a12013-03-05 20:37:46 +02004536 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4537 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004538 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4539 hdev->name, opcode);
4540 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004541 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004542 }
4543
4544 if (skb_queue_empty(&req->cmd_q))
4545 bt_cb(skb)->req.start = true;
4546
Johan Hedberg02350a72013-04-03 21:50:29 +03004547 bt_cb(skb)->req.event = event;
4548
Johan Hedberg71c76a12013-03-05 20:37:46 +02004549 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004550}
4551
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004552void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4553 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004554{
4555 hci_req_add_ev(req, opcode, plen, param, 0);
4556}
4557
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004559void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560{
4561 struct hci_command_hdr *hdr;
4562
4563 if (!hdev->sent_cmd)
4564 return NULL;
4565
4566 hdr = (void *) hdev->sent_cmd->data;
4567
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004568 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 return NULL;
4570
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004571 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572
4573 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4574}
4575
4576/* Send ACL data */
4577static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4578{
4579 struct hci_acl_hdr *hdr;
4580 int len = skb->len;
4581
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004582 skb_push(skb, HCI_ACL_HDR_SIZE);
4583 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004584 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004585 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4586 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587}
4588
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004589static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004590 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004592 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 struct hci_dev *hdev = conn->hdev;
4594 struct sk_buff *list;
4595
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004596 skb->len = skb_headlen(skb);
4597 skb->data_len = 0;
4598
4599 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004600
4601 switch (hdev->dev_type) {
4602 case HCI_BREDR:
4603 hci_add_acl_hdr(skb, conn->handle, flags);
4604 break;
4605 case HCI_AMP:
4606 hci_add_acl_hdr(skb, chan->handle, flags);
4607 break;
4608 default:
4609 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4610 return;
4611 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004612
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004613 list = skb_shinfo(skb)->frag_list;
4614 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615 /* Non fragmented */
4616 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4617
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004618 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619 } else {
4620 /* Fragmented */
4621 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4622
4623 skb_shinfo(skb)->frag_list = NULL;
4624
4625 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004626 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004628 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004629
4630 flags &= ~ACL_START;
4631 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632 do {
4633 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004634
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004635 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004636 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637
4638 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4639
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004640 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 } while (list);
4642
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004643 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004645}
4646
4647void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4648{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004649 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004650
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004651 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004652
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004653 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004655 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657
4658/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004659void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660{
4661 struct hci_dev *hdev = conn->hdev;
4662 struct hci_sco_hdr hdr;
4663
4664 BT_DBG("%s len %d", hdev->name, skb->len);
4665
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004666 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 hdr.dlen = skb->len;
4668
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004669 skb_push(skb, HCI_SCO_HDR_SIZE);
4670 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004671 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004673 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004674
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004676 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678
4679/* ---- HCI TX task (outgoing data) ---- */
4680
4681/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004682static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4683 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684{
4685 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004686 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004687 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004689 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004691
4692 rcu_read_lock();
4693
4694 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004695 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004697
4698 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4699 continue;
4700
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701 num++;
4702
4703 if (c->sent < min) {
4704 min = c->sent;
4705 conn = c;
4706 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004707
4708 if (hci_conn_num(hdev, type) == num)
4709 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710 }
4711
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004712 rcu_read_unlock();
4713
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004715 int cnt, q;
4716
4717 switch (conn->type) {
4718 case ACL_LINK:
4719 cnt = hdev->acl_cnt;
4720 break;
4721 case SCO_LINK:
4722 case ESCO_LINK:
4723 cnt = hdev->sco_cnt;
4724 break;
4725 case LE_LINK:
4726 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4727 break;
4728 default:
4729 cnt = 0;
4730 BT_ERR("Unknown link type");
4731 }
4732
4733 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734 *quote = q ? q : 1;
4735 } else
4736 *quote = 0;
4737
4738 BT_DBG("conn %p quote %d", conn, *quote);
4739 return conn;
4740}
4741
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004742static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743{
4744 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004745 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746
Ville Tervobae1f5d92011-02-10 22:38:53 -03004747 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004749 rcu_read_lock();
4750
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004752 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004753 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004754 BT_ERR("%s killing stalled connection %pMR",
4755 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004756 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 }
4758 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004759
4760 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761}
4762
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004763static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4764 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004765{
4766 struct hci_conn_hash *h = &hdev->conn_hash;
4767 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004768 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004769 struct hci_conn *conn;
4770 int cnt, q, conn_num = 0;
4771
4772 BT_DBG("%s", hdev->name);
4773
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004774 rcu_read_lock();
4775
4776 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004777 struct hci_chan *tmp;
4778
4779 if (conn->type != type)
4780 continue;
4781
4782 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4783 continue;
4784
4785 conn_num++;
4786
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004787 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004788 struct sk_buff *skb;
4789
4790 if (skb_queue_empty(&tmp->data_q))
4791 continue;
4792
4793 skb = skb_peek(&tmp->data_q);
4794 if (skb->priority < cur_prio)
4795 continue;
4796
4797 if (skb->priority > cur_prio) {
4798 num = 0;
4799 min = ~0;
4800 cur_prio = skb->priority;
4801 }
4802
4803 num++;
4804
4805 if (conn->sent < min) {
4806 min = conn->sent;
4807 chan = tmp;
4808 }
4809 }
4810
4811 if (hci_conn_num(hdev, type) == conn_num)
4812 break;
4813 }
4814
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004815 rcu_read_unlock();
4816
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004817 if (!chan)
4818 return NULL;
4819
4820 switch (chan->conn->type) {
4821 case ACL_LINK:
4822 cnt = hdev->acl_cnt;
4823 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004824 case AMP_LINK:
4825 cnt = hdev->block_cnt;
4826 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004827 case SCO_LINK:
4828 case ESCO_LINK:
4829 cnt = hdev->sco_cnt;
4830 break;
4831 case LE_LINK:
4832 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4833 break;
4834 default:
4835 cnt = 0;
4836 BT_ERR("Unknown link type");
4837 }
4838
4839 q = cnt / num;
4840 *quote = q ? q : 1;
4841 BT_DBG("chan %p quote %d", chan, *quote);
4842 return chan;
4843}
4844
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004845static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4846{
4847 struct hci_conn_hash *h = &hdev->conn_hash;
4848 struct hci_conn *conn;
4849 int num = 0;
4850
4851 BT_DBG("%s", hdev->name);
4852
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004853 rcu_read_lock();
4854
4855 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004856 struct hci_chan *chan;
4857
4858 if (conn->type != type)
4859 continue;
4860
4861 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4862 continue;
4863
4864 num++;
4865
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004866 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004867 struct sk_buff *skb;
4868
4869 if (chan->sent) {
4870 chan->sent = 0;
4871 continue;
4872 }
4873
4874 if (skb_queue_empty(&chan->data_q))
4875 continue;
4876
4877 skb = skb_peek(&chan->data_q);
4878 if (skb->priority >= HCI_PRIO_MAX - 1)
4879 continue;
4880
4881 skb->priority = HCI_PRIO_MAX - 1;
4882
4883 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004884 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004885 }
4886
4887 if (hci_conn_num(hdev, type) == num)
4888 break;
4889 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004890
4891 rcu_read_unlock();
4892
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004893}
4894
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004895static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4896{
4897 /* Calculate count of blocks used by this packet */
4898 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4899}
4900
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004901static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004903 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904 /* ACL tx timeout must be longer than maximum
4905 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004906 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004907 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004908 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004909 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004910}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004912static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004913{
4914 unsigned int cnt = hdev->acl_cnt;
4915 struct hci_chan *chan;
4916 struct sk_buff *skb;
4917 int quote;
4918
4919 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004920
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004921 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004922 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004923 u32 priority = (skb_peek(&chan->data_q))->priority;
4924 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004925 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004926 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004927
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004928 /* Stop if priority has changed */
4929 if (skb->priority < priority)
4930 break;
4931
4932 skb = skb_dequeue(&chan->data_q);
4933
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004934 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004935 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004936
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004937 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938 hdev->acl_last_tx = jiffies;
4939
4940 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004941 chan->sent++;
4942 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943 }
4944 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004945
4946 if (cnt != hdev->acl_cnt)
4947 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004948}
4949
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004950static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004951{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004952 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004953 struct hci_chan *chan;
4954 struct sk_buff *skb;
4955 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004956 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004957
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004958 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004959
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004960 BT_DBG("%s", hdev->name);
4961
4962 if (hdev->dev_type == HCI_AMP)
4963 type = AMP_LINK;
4964 else
4965 type = ACL_LINK;
4966
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004967 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004968 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004969 u32 priority = (skb_peek(&chan->data_q))->priority;
4970 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4971 int blocks;
4972
4973 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004974 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004975
4976 /* Stop if priority has changed */
4977 if (skb->priority < priority)
4978 break;
4979
4980 skb = skb_dequeue(&chan->data_q);
4981
4982 blocks = __get_blocks(hdev, skb);
4983 if (blocks > hdev->block_cnt)
4984 return;
4985
4986 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004987 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004988
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004989 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004990 hdev->acl_last_tx = jiffies;
4991
4992 hdev->block_cnt -= blocks;
4993 quote -= blocks;
4994
4995 chan->sent += blocks;
4996 chan->conn->sent += blocks;
4997 }
4998 }
4999
5000 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005001 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005002}
5003
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005004static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005005{
5006 BT_DBG("%s", hdev->name);
5007
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005008 /* No ACL link over BR/EDR controller */
5009 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5010 return;
5011
5012 /* No AMP link over AMP controller */
5013 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005014 return;
5015
5016 switch (hdev->flow_ctl_mode) {
5017 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5018 hci_sched_acl_pkt(hdev);
5019 break;
5020
5021 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5022 hci_sched_acl_blk(hdev);
5023 break;
5024 }
5025}
5026
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005028static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029{
5030 struct hci_conn *conn;
5031 struct sk_buff *skb;
5032 int quote;
5033
5034 BT_DBG("%s", hdev->name);
5035
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005036 if (!hci_conn_num(hdev, SCO_LINK))
5037 return;
5038
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5040 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5041 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005042 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043
5044 conn->sent++;
5045 if (conn->sent == ~0)
5046 conn->sent = 0;
5047 }
5048 }
5049}
5050
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005051static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005052{
5053 struct hci_conn *conn;
5054 struct sk_buff *skb;
5055 int quote;
5056
5057 BT_DBG("%s", hdev->name);
5058
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005059 if (!hci_conn_num(hdev, ESCO_LINK))
5060 return;
5061
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005062 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5063 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005064 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5065 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005066 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005067
5068 conn->sent++;
5069 if (conn->sent == ~0)
5070 conn->sent = 0;
5071 }
5072 }
5073}
5074
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005075static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005076{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005077 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005078 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005079 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005080
5081 BT_DBG("%s", hdev->name);
5082
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005083 if (!hci_conn_num(hdev, LE_LINK))
5084 return;
5085
Marcel Holtmann4a964402014-07-02 19:10:33 +02005086 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005087 /* LE tx timeout must be longer than maximum
5088 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005089 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005090 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005091 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005092 }
5093
5094 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005095 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005096 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005097 u32 priority = (skb_peek(&chan->data_q))->priority;
5098 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005099 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005100 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005101
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005102 /* Stop if priority has changed */
5103 if (skb->priority < priority)
5104 break;
5105
5106 skb = skb_dequeue(&chan->data_q);
5107
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005108 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005109 hdev->le_last_tx = jiffies;
5110
5111 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005112 chan->sent++;
5113 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005114 }
5115 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005116
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005117 if (hdev->le_pkts)
5118 hdev->le_cnt = cnt;
5119 else
5120 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005121
5122 if (cnt != tmp)
5123 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005124}
5125
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005126static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005128 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129 struct sk_buff *skb;
5130
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005131 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005132 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133
Marcel Holtmann52de5992013-09-03 18:08:38 -07005134 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5135 /* Schedule queues and send stuff to HCI driver */
5136 hci_sched_acl(hdev);
5137 hci_sched_sco(hdev);
5138 hci_sched_esco(hdev);
5139 hci_sched_le(hdev);
5140 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005141
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142 /* Send next queued raw (unknown type) packet */
5143 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005144 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145}
5146
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005147/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148
5149/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005150static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151{
5152 struct hci_acl_hdr *hdr = (void *) skb->data;
5153 struct hci_conn *conn;
5154 __u16 handle, flags;
5155
5156 skb_pull(skb, HCI_ACL_HDR_SIZE);
5157
5158 handle = __le16_to_cpu(hdr->handle);
5159 flags = hci_flags(handle);
5160 handle = hci_handle(handle);
5161
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005162 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005163 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164
5165 hdev->stat.acl_rx++;
5166
5167 hci_dev_lock(hdev);
5168 conn = hci_conn_hash_lookup_handle(hdev, handle);
5169 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005170
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005172 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005173
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005175 l2cap_recv_acldata(conn, skb, flags);
5176 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005178 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005179 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180 }
5181
5182 kfree_skb(skb);
5183}
5184
5185/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005186static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187{
5188 struct hci_sco_hdr *hdr = (void *) skb->data;
5189 struct hci_conn *conn;
5190 __u16 handle;
5191
5192 skb_pull(skb, HCI_SCO_HDR_SIZE);
5193
5194 handle = __le16_to_cpu(hdr->handle);
5195
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005196 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197
5198 hdev->stat.sco_rx++;
5199
5200 hci_dev_lock(hdev);
5201 conn = hci_conn_hash_lookup_handle(hdev, handle);
5202 hci_dev_unlock(hdev);
5203
5204 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005206 sco_recv_scodata(conn, skb);
5207 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005209 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005210 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 }
5212
5213 kfree_skb(skb);
5214}
5215
Johan Hedberg9238f362013-03-05 20:37:48 +02005216static bool hci_req_is_complete(struct hci_dev *hdev)
5217{
5218 struct sk_buff *skb;
5219
5220 skb = skb_peek(&hdev->cmd_q);
5221 if (!skb)
5222 return true;
5223
5224 return bt_cb(skb)->req.start;
5225}
5226
Johan Hedberg42c6b122013-03-05 20:37:49 +02005227static void hci_resend_last(struct hci_dev *hdev)
5228{
5229 struct hci_command_hdr *sent;
5230 struct sk_buff *skb;
5231 u16 opcode;
5232
5233 if (!hdev->sent_cmd)
5234 return;
5235
5236 sent = (void *) hdev->sent_cmd->data;
5237 opcode = __le16_to_cpu(sent->opcode);
5238 if (opcode == HCI_OP_RESET)
5239 return;
5240
5241 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5242 if (!skb)
5243 return;
5244
5245 skb_queue_head(&hdev->cmd_q, skb);
5246 queue_work(hdev->workqueue, &hdev->cmd_work);
5247}
5248
Johan Hedberg9238f362013-03-05 20:37:48 +02005249void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5250{
5251 hci_req_complete_t req_complete = NULL;
5252 struct sk_buff *skb;
5253 unsigned long flags;
5254
5255 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5256
Johan Hedberg42c6b122013-03-05 20:37:49 +02005257 /* If the completed command doesn't match the last one that was
5258 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005259 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005260 if (!hci_sent_cmd_data(hdev, opcode)) {
5261 /* Some CSR based controllers generate a spontaneous
5262 * reset complete event during init and any pending
5263 * command will never be completed. In such a case we
5264 * need to resend whatever was the last sent
5265 * command.
5266 */
5267 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5268 hci_resend_last(hdev);
5269
Johan Hedberg9238f362013-03-05 20:37:48 +02005270 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005271 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005272
5273 /* If the command succeeded and there's still more commands in
5274 * this request the request is not yet complete.
5275 */
5276 if (!status && !hci_req_is_complete(hdev))
5277 return;
5278
5279 /* If this was the last command in a request the complete
5280 * callback would be found in hdev->sent_cmd instead of the
5281 * command queue (hdev->cmd_q).
5282 */
5283 if (hdev->sent_cmd) {
5284 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005285
5286 if (req_complete) {
5287 /* We must set the complete callback to NULL to
5288 * avoid calling the callback more than once if
5289 * this function gets called again.
5290 */
5291 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5292
Johan Hedberg9238f362013-03-05 20:37:48 +02005293 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005294 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005295 }
5296
5297 /* Remove all pending commands belonging to this request */
5298 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5299 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5300 if (bt_cb(skb)->req.start) {
5301 __skb_queue_head(&hdev->cmd_q, skb);
5302 break;
5303 }
5304
5305 req_complete = bt_cb(skb)->req.complete;
5306 kfree_skb(skb);
5307 }
5308 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5309
5310call_complete:
5311 if (req_complete)
5312 req_complete(hdev, status);
5313}
5314
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005315static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005317 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318 struct sk_buff *skb;
5319
5320 BT_DBG("%s", hdev->name);
5321
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005323 /* Send copy to monitor */
5324 hci_send_to_monitor(hdev, skb);
5325
Linus Torvalds1da177e2005-04-16 15:20:36 -07005326 if (atomic_read(&hdev->promisc)) {
5327 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005328 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329 }
5330
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005331 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332 kfree_skb(skb);
5333 continue;
5334 }
5335
5336 if (test_bit(HCI_INIT, &hdev->flags)) {
5337 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005338 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339 case HCI_ACLDATA_PKT:
5340 case HCI_SCODATA_PKT:
5341 kfree_skb(skb);
5342 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344 }
5345
5346 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005347 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005348 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005349 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005350 hci_event_packet(hdev, skb);
5351 break;
5352
5353 case HCI_ACLDATA_PKT:
5354 BT_DBG("%s ACL data packet", hdev->name);
5355 hci_acldata_packet(hdev, skb);
5356 break;
5357
5358 case HCI_SCODATA_PKT:
5359 BT_DBG("%s SCO data packet", hdev->name);
5360 hci_scodata_packet(hdev, skb);
5361 break;
5362
5363 default:
5364 kfree_skb(skb);
5365 break;
5366 }
5367 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368}
5369
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005370static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005371{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005372 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373 struct sk_buff *skb;
5374
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005375 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5376 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005379 if (atomic_read(&hdev->cmd_cnt)) {
5380 skb = skb_dequeue(&hdev->cmd_q);
5381 if (!skb)
5382 return;
5383
Wei Yongjun7585b972009-02-25 18:29:52 +08005384 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005386 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005387 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005389 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005390 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005391 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005392 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005393 schedule_delayed_work(&hdev->cmd_timer,
5394 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395 } else {
5396 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005397 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005398 }
5399 }
5400}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005401
5402void hci_req_add_le_scan_disable(struct hci_request *req)
5403{
5404 struct hci_cp_le_set_scan_enable cp;
5405
5406 memset(&cp, 0, sizeof(cp));
5407 cp.enable = LE_SCAN_DISABLE;
5408 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5409}
Andre Guedesa4790db2014-02-26 20:21:47 -03005410
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005411static void add_to_white_list(struct hci_request *req,
5412 struct hci_conn_params *params)
5413{
5414 struct hci_cp_le_add_to_white_list cp;
5415
5416 cp.bdaddr_type = params->addr_type;
5417 bacpy(&cp.bdaddr, &params->addr);
5418
5419 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5420}
5421
5422static u8 update_white_list(struct hci_request *req)
5423{
5424 struct hci_dev *hdev = req->hdev;
5425 struct hci_conn_params *params;
5426 struct bdaddr_list *b;
5427 uint8_t white_list_entries = 0;
5428
5429 /* Go through the current white list programmed into the
5430 * controller one by one and check if that address is still
5431 * in the list of pending connections or list of devices to
5432 * report. If not present in either list, then queue the
5433 * command to remove it from the controller.
5434 */
5435 list_for_each_entry(b, &hdev->le_white_list, list) {
5436 struct hci_cp_le_del_from_white_list cp;
5437
5438 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5439 &b->bdaddr, b->bdaddr_type) ||
5440 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5441 &b->bdaddr, b->bdaddr_type)) {
5442 white_list_entries++;
5443 continue;
5444 }
5445
5446 cp.bdaddr_type = b->bdaddr_type;
5447 bacpy(&cp.bdaddr, &b->bdaddr);
5448
5449 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5450 sizeof(cp), &cp);
5451 }
5452
5453 /* Since all no longer valid white list entries have been
5454 * removed, walk through the list of pending connections
5455 * and ensure that any new device gets programmed into
5456 * the controller.
5457 *
5458 * If the list of the devices is larger than the list of
5459 * available white list entries in the controller, then
5460 * just abort and return filer policy value to not use the
5461 * white list.
5462 */
5463 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5464 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5465 &params->addr, params->addr_type))
5466 continue;
5467
5468 if (white_list_entries >= hdev->le_white_list_size) {
5469 /* Select filter policy to accept all advertising */
5470 return 0x00;
5471 }
5472
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005473 if (hci_find_irk_by_addr(hdev, &params->addr,
5474 params->addr_type)) {
5475 /* White list can not be used with RPAs */
5476 return 0x00;
5477 }
5478
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005479 white_list_entries++;
5480 add_to_white_list(req, params);
5481 }
5482
5483 /* After adding all new pending connections, walk through
5484 * the list of pending reports and also add these to the
5485 * white list if there is still space.
5486 */
5487 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5488 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5489 &params->addr, params->addr_type))
5490 continue;
5491
5492 if (white_list_entries >= hdev->le_white_list_size) {
5493 /* Select filter policy to accept all advertising */
5494 return 0x00;
5495 }
5496
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005497 if (hci_find_irk_by_addr(hdev, &params->addr,
5498 params->addr_type)) {
5499 /* White list can not be used with RPAs */
5500 return 0x00;
5501 }
5502
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005503 white_list_entries++;
5504 add_to_white_list(req, params);
5505 }
5506
5507 /* Select filter policy to use white list */
5508 return 0x01;
5509}
5510
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005511void hci_req_add_le_passive_scan(struct hci_request *req)
5512{
5513 struct hci_cp_le_set_scan_param param_cp;
5514 struct hci_cp_le_set_scan_enable enable_cp;
5515 struct hci_dev *hdev = req->hdev;
5516 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005517 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005518
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005519 /* Set require_privacy to false since no SCAN_REQ are send
5520 * during passive scanning. Not using an unresolvable address
5521 * here is important so that peer devices using direct
5522 * advertising with our address will be correctly reported
5523 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005524 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005525 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005526 return;
5527
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005528 /* Adding or removing entries from the white list must
5529 * happen before enabling scanning. The controller does
5530 * not allow white list modification while scanning.
5531 */
5532 filter_policy = update_white_list(req);
5533
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005534 memset(&param_cp, 0, sizeof(param_cp));
5535 param_cp.type = LE_SCAN_PASSIVE;
5536 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5537 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5538 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005539 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005540 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5541 &param_cp);
5542
5543 memset(&enable_cp, 0, sizeof(enable_cp));
5544 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005545 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005546 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5547 &enable_cp);
5548}
5549
Andre Guedesa4790db2014-02-26 20:21:47 -03005550static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5551{
5552 if (status)
5553 BT_DBG("HCI request failed to update background scanning: "
5554 "status 0x%2.2x", status);
5555}
5556
5557/* This function controls the background scanning based on hdev->pend_le_conns
5558 * list. If there are pending LE connection we start the background scanning,
5559 * otherwise we stop it.
5560 *
5561 * This function requires the caller holds hdev->lock.
5562 */
5563void hci_update_background_scan(struct hci_dev *hdev)
5564{
Andre Guedesa4790db2014-02-26 20:21:47 -03005565 struct hci_request req;
5566 struct hci_conn *conn;
5567 int err;
5568
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005569 if (!test_bit(HCI_UP, &hdev->flags) ||
5570 test_bit(HCI_INIT, &hdev->flags) ||
5571 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005572 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005573 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005574 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005575 return;
5576
Johan Hedberga70f4b52014-07-07 15:19:50 +03005577 /* No point in doing scanning if LE support hasn't been enabled */
5578 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5579 return;
5580
Johan Hedbergae23ada2014-07-07 13:24:59 +03005581 /* If discovery is active don't interfere with it */
5582 if (hdev->discovery.state != DISCOVERY_STOPPED)
5583 return;
5584
Andre Guedesa4790db2014-02-26 20:21:47 -03005585 hci_req_init(&req, hdev);
5586
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005587 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005588 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005589 /* If there is no pending LE connections or devices
5590 * to be scanned for, we should stop the background
5591 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005592 */
5593
5594 /* If controller is not scanning we are done. */
5595 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5596 return;
5597
5598 hci_req_add_le_scan_disable(&req);
5599
5600 BT_DBG("%s stopping background scanning", hdev->name);
5601 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005602 /* If there is at least one pending LE connection, we should
5603 * keep the background scan running.
5604 */
5605
Andre Guedesa4790db2014-02-26 20:21:47 -03005606 /* If controller is connecting, we should not start scanning
5607 * since some controllers are not able to scan and connect at
5608 * the same time.
5609 */
5610 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5611 if (conn)
5612 return;
5613
Andre Guedes4340a122014-03-10 18:26:24 -03005614 /* If controller is currently scanning, we stop it to ensure we
5615 * don't miss any advertising (due to duplicates filter).
5616 */
5617 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5618 hci_req_add_le_scan_disable(&req);
5619
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005620 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005621
5622 BT_DBG("%s starting background scanning", hdev->name);
5623 }
5624
5625 err = hci_req_run(&req, update_background_scan_complete);
5626 if (err)
5627 BT_ERR("Failed to run HCI request: err %d", err);
5628}