blob: 94551c33c4c6a3a57fc6af66da8b3e9717372aa8 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
Marcel Holtmann111902f2014-06-21 04:53:17 +0200361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
Marcel Holtmann111902f2014-06-21 04:53:17 +0200386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800387 return -EALREADY;
388
Marcel Holtmann111902f2014-06-21 04:53:17 +0200389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700427 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700486 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700514 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
Marcel Holtmannac345812014-02-23 12:44:25 -0800590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200593 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
Johan Hedberga1f4c312014-02-27 14:05:41 +0200598 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800599
Johan Hedberga1f4c312014-02-27 14:05:41 +0200600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800601 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700669{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700672
Marcel Holtmann111902f2014-06-21 04:53:17 +0200673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677}
678
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
682{
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
687
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
690
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700696 return -EINVAL;
697
Marcel Holtmann111902f2014-06-21 04:53:17 +0200698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800699 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700700
Marcel Holtmann111902f2014-06-21 04:53:17 +0200701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800702
703 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700704}
705
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
Marcel Holtmann92202182013-10-18 16:38:10 -0700712
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
Marcel Holtmann3698d702014-02-18 21:54:49 -0800738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800774 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800779 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700806 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700834 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800854static int adv_channel_map_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val < 0x01 || val > 0x07)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_adv_channel_map = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int adv_channel_map_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_adv_channel_map;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880 adv_channel_map_set, "%llu\n");
881
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200882static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300883{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200884 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300885 struct hci_conn_params *p;
886
887 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300888 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200889 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300890 p->auto_connect);
891 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200897static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300898{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200899 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300900}
901
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200902static const struct file_operations device_list_fops = {
903 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300904 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300905 .llseek = seq_lseek,
906 .release = single_release,
907};
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909/* ---- HCI requests ---- */
910
Johan Hedberg42c6b122013-03-05 20:37:49 +0200911static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
915 if (hdev->req_status == HCI_REQ_PEND) {
916 hdev->req_result = result;
917 hdev->req_status = HCI_REQ_DONE;
918 wake_up_interruptible(&hdev->req_wait_q);
919 }
920}
921
922static void hci_req_cancel(struct hci_dev *hdev, int err)
923{
924 BT_DBG("%s err 0x%2.2x", hdev->name, err);
925
926 if (hdev->req_status == HCI_REQ_PEND) {
927 hdev->req_result = err;
928 hdev->req_status = HCI_REQ_CANCELED;
929 wake_up_interruptible(&hdev->req_wait_q);
930 }
931}
932
Fengguang Wu77a63e02013-04-20 16:24:31 +0300933static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
934 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300935{
936 struct hci_ev_cmd_complete *ev;
937 struct hci_event_hdr *hdr;
938 struct sk_buff *skb;
939
940 hci_dev_lock(hdev);
941
942 skb = hdev->recv_evt;
943 hdev->recv_evt = NULL;
944
945 hci_dev_unlock(hdev);
946
947 if (!skb)
948 return ERR_PTR(-ENODATA);
949
950 if (skb->len < sizeof(*hdr)) {
951 BT_ERR("Too short HCI event");
952 goto failed;
953 }
954
955 hdr = (void *) skb->data;
956 skb_pull(skb, HCI_EVENT_HDR_SIZE);
957
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300958 if (event) {
959 if (hdr->evt != event)
960 goto failed;
961 return skb;
962 }
963
Johan Hedberg75e84b72013-04-02 13:35:04 +0300964 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
965 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
966 goto failed;
967 }
968
969 if (skb->len < sizeof(*ev)) {
970 BT_ERR("Too short cmd_complete event");
971 goto failed;
972 }
973
974 ev = (void *) skb->data;
975 skb_pull(skb, sizeof(*ev));
976
977 if (opcode == __le16_to_cpu(ev->opcode))
978 return skb;
979
980 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
981 __le16_to_cpu(ev->opcode));
982
983failed:
984 kfree_skb(skb);
985 return ERR_PTR(-ENODATA);
986}
987
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300988struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300989 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300990{
991 DECLARE_WAITQUEUE(wait, current);
992 struct hci_request req;
993 int err = 0;
994
995 BT_DBG("%s", hdev->name);
996
997 hci_req_init(&req, hdev);
998
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300999 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001000
1001 hdev->req_status = HCI_REQ_PEND;
1002
1003 err = hci_req_run(&req, hci_req_sync_complete);
1004 if (err < 0)
1005 return ERR_PTR(err);
1006
1007 add_wait_queue(&hdev->req_wait_q, &wait);
1008 set_current_state(TASK_INTERRUPTIBLE);
1009
1010 schedule_timeout(timeout);
1011
1012 remove_wait_queue(&hdev->req_wait_q, &wait);
1013
1014 if (signal_pending(current))
1015 return ERR_PTR(-EINTR);
1016
1017 switch (hdev->req_status) {
1018 case HCI_REQ_DONE:
1019 err = -bt_to_errno(hdev->req_result);
1020 break;
1021
1022 case HCI_REQ_CANCELED:
1023 err = -hdev->req_result;
1024 break;
1025
1026 default:
1027 err = -ETIMEDOUT;
1028 break;
1029 }
1030
1031 hdev->req_status = hdev->req_result = 0;
1032
1033 BT_DBG("%s end: err %d", hdev->name, err);
1034
1035 if (err < 0)
1036 return ERR_PTR(err);
1037
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001038 return hci_get_cmd_complete(hdev, opcode, event);
1039}
1040EXPORT_SYMBOL(__hci_cmd_sync_ev);
1041
1042struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001043 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001044{
1045 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001046}
1047EXPORT_SYMBOL(__hci_cmd_sync);
1048
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001050static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 void (*func)(struct hci_request *req,
1052 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001053 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001055 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 DECLARE_WAITQUEUE(wait, current);
1057 int err = 0;
1058
1059 BT_DBG("%s start", hdev->name);
1060
Johan Hedberg42c6b122013-03-05 20:37:49 +02001061 hci_req_init(&req, hdev);
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 hdev->req_status = HCI_REQ_PEND;
1064
Johan Hedberg42c6b122013-03-05 20:37:49 +02001065 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001066
Johan Hedberg42c6b122013-03-05 20:37:49 +02001067 err = hci_req_run(&req, hci_req_sync_complete);
1068 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001069 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001070
1071 /* ENODATA means the HCI request command queue is empty.
1072 * This can happen when a request with conditionals doesn't
1073 * trigger any commands to be sent. This is normal behavior
1074 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001075 */
Andre Guedes920c8302013-03-08 11:20:15 -03001076 if (err == -ENODATA)
1077 return 0;
1078
1079 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001080 }
1081
Andre Guedesbc4445c2013-03-08 11:20:13 -03001082 add_wait_queue(&hdev->req_wait_q, &wait);
1083 set_current_state(TASK_INTERRUPTIBLE);
1084
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 schedule_timeout(timeout);
1086
1087 remove_wait_queue(&hdev->req_wait_q, &wait);
1088
1089 if (signal_pending(current))
1090 return -EINTR;
1091
1092 switch (hdev->req_status) {
1093 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001094 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 break;
1096
1097 case HCI_REQ_CANCELED:
1098 err = -hdev->req_result;
1099 break;
1100
1101 default:
1102 err = -ETIMEDOUT;
1103 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Johan Hedberga5040ef2011-01-10 13:28:59 +02001106 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 BT_DBG("%s end: err %d", hdev->name, err);
1109
1110 return err;
1111}
1112
Johan Hedberg01178cd2013-03-05 20:37:41 +02001113static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001114 void (*req)(struct hci_request *req,
1115 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001116 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117{
1118 int ret;
1119
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001120 if (!test_bit(HCI_UP, &hdev->flags))
1121 return -ENETDOWN;
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 /* Serialize all requests */
1124 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001125 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 hci_req_unlock(hdev);
1127
1128 return ret;
1129}
1130
Johan Hedberg42c6b122013-03-05 20:37:49 +02001131static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001133 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001136 set_bit(HCI_RESET, &req->hdev->flags);
1137 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138}
1139
Johan Hedberg42c6b122013-03-05 20:37:49 +02001140static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001145 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001147 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001148 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001149
1150 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001151 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152}
1153
Johan Hedberg42c6b122013-03-05 20:37:49 +02001154static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001155{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001157
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001158 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001159 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001160
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001161 /* Read Local Supported Commands */
1162 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1163
1164 /* Read Local Supported Features */
1165 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1166
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001167 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001168 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001169
1170 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001171 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001172
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001173 /* Read Flow Control Mode */
1174 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1175
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001176 /* Read Location Data */
1177 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001178}
1179
Johan Hedberg42c6b122013-03-05 20:37:49 +02001180static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001181{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001182 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001183
1184 BT_DBG("%s %ld", hdev->name, opt);
1185
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001186 /* Reset */
1187 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001190 switch (hdev->dev_type) {
1191 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001192 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001193 break;
1194
1195 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001196 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001197 break;
1198
1199 default:
1200 BT_ERR("Unknown device type %d", hdev->dev_type);
1201 break;
1202 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001203}
1204
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001207 struct hci_dev *hdev = req->hdev;
1208
Johan Hedberg2177bab2013-03-05 20:37:43 +02001209 __le16 param;
1210 __u8 flt_type;
1211
1212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001214
1215 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001217
1218 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001219 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001220
1221 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001223
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001224 /* Read Number of Supported IAC */
1225 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1226
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001227 /* Read Current IAC LAP */
1228 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1229
Johan Hedberg2177bab2013-03-05 20:37:43 +02001230 /* Clear Event Filters */
1231 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001232 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001233
1234 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001235 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001237
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001238 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1239 * but it does not support page scan related HCI commands.
1240 */
1241 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001242 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1243 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1244 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001245}
1246
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001249 struct hci_dev *hdev = req->hdev;
1250
Johan Hedberg2177bab2013-03-05 20:37:43 +02001251 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001253
1254 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001256
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001257 /* Read LE Supported States */
1258 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1259
Johan Hedberg2177bab2013-03-05 20:37:43 +02001260 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001262
1263 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001265
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001266 /* Clear LE White List */
1267 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001268
1269 /* LE-only controllers have LE implicitly enabled */
1270 if (!lmp_bredr_capable(hdev))
1271 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001272}
1273
1274static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1275{
1276 if (lmp_ext_inq_capable(hdev))
1277 return 0x02;
1278
1279 if (lmp_inq_rssi_capable(hdev))
1280 return 0x01;
1281
1282 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1283 hdev->lmp_subver == 0x0757)
1284 return 0x01;
1285
1286 if (hdev->manufacturer == 15) {
1287 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1288 return 0x01;
1289 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1290 return 0x01;
1291 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1292 return 0x01;
1293 }
1294
1295 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1296 hdev->lmp_subver == 0x1805)
1297 return 0x01;
1298
1299 return 0x00;
1300}
1301
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001303{
1304 u8 mode;
1305
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001307
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001309}
1310
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 struct hci_dev *hdev = req->hdev;
1314
Johan Hedberg2177bab2013-03-05 20:37:43 +02001315 /* The second byte is 0xff instead of 0x9f (two reserved bits
1316 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1317 * command otherwise.
1318 */
1319 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1320
1321 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1322 * any event mask for pre 1.2 devices.
1323 */
1324 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1325 return;
1326
1327 if (lmp_bredr_capable(hdev)) {
1328 events[4] |= 0x01; /* Flow Specification Complete */
1329 events[4] |= 0x02; /* Inquiry Result with RSSI */
1330 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1331 events[5] |= 0x08; /* Synchronous Connection Complete */
1332 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001333 } else {
1334 /* Use a different default for LE-only devices */
1335 memset(events, 0, sizeof(events));
1336 events[0] |= 0x10; /* Disconnection Complete */
1337 events[0] |= 0x80; /* Encryption Change */
1338 events[1] |= 0x08; /* Read Remote Version Information Complete */
1339 events[1] |= 0x20; /* Command Complete */
1340 events[1] |= 0x40; /* Command Status */
1341 events[1] |= 0x80; /* Hardware Error */
1342 events[2] |= 0x04; /* Number of Completed Packets */
1343 events[3] |= 0x02; /* Data Buffer Overflow */
1344 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345 }
1346
1347 if (lmp_inq_rssi_capable(hdev))
1348 events[4] |= 0x02; /* Inquiry Result with RSSI */
1349
1350 if (lmp_sniffsubr_capable(hdev))
1351 events[5] |= 0x20; /* Sniff Subrating */
1352
1353 if (lmp_pause_enc_capable(hdev))
1354 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1355
1356 if (lmp_ext_inq_capable(hdev))
1357 events[5] |= 0x40; /* Extended Inquiry Result */
1358
1359 if (lmp_no_flush_capable(hdev))
1360 events[7] |= 0x01; /* Enhanced Flush Complete */
1361
1362 if (lmp_lsto_capable(hdev))
1363 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1364
1365 if (lmp_ssp_capable(hdev)) {
1366 events[6] |= 0x01; /* IO Capability Request */
1367 events[6] |= 0x02; /* IO Capability Response */
1368 events[6] |= 0x04; /* User Confirmation Request */
1369 events[6] |= 0x08; /* User Passkey Request */
1370 events[6] |= 0x10; /* Remote OOB Data Request */
1371 events[6] |= 0x20; /* Simple Pairing Complete */
1372 events[7] |= 0x04; /* User Passkey Notification */
1373 events[7] |= 0x08; /* Keypress Notification */
1374 events[7] |= 0x10; /* Remote Host Supported
1375 * Features Notification
1376 */
1377 }
1378
1379 if (lmp_le_capable(hdev))
1380 events[7] |= 0x20; /* LE Meta-Event */
1381
Johan Hedberg42c6b122013-03-05 20:37:49 +02001382 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383
1384 if (lmp_le_capable(hdev)) {
1385 memset(events, 0, sizeof(events));
1386 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1388 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389 }
1390}
1391
Johan Hedberg42c6b122013-03-05 20:37:49 +02001392static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001393{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394 struct hci_dev *hdev = req->hdev;
1395
Johan Hedberg2177bab2013-03-05 20:37:43 +02001396 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001397 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001398 else
1399 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400
1401 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001402 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403
Johan Hedberg42c6b122013-03-05 20:37:49 +02001404 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001405
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001406 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1407 * local supported commands HCI command.
1408 */
1409 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001410 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001411
1412 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001413 /* When SSP is available, then the host features page
1414 * should also be available as well. However some
1415 * controllers list the max_page as 0 as long as SSP
1416 * has not been enabled. To achieve proper debugging
1417 * output, force the minimum max_page to 1 at least.
1418 */
1419 hdev->max_page = 0x01;
1420
Johan Hedberg2177bab2013-03-05 20:37:43 +02001421 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1422 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001423 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1424 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001425 } else {
1426 struct hci_cp_write_eir cp;
1427
1428 memset(hdev->eir, 0, sizeof(hdev->eir));
1429 memset(&cp, 0, sizeof(cp));
1430
Johan Hedberg42c6b122013-03-05 20:37:49 +02001431 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001432 }
1433 }
1434
1435 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001436 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001437
1438 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440
1441 if (lmp_ext_feat_capable(hdev)) {
1442 struct hci_cp_read_local_ext_features cp;
1443
1444 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001445 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1446 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001447 }
1448
1449 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1450 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001451 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1452 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453 }
1454}
1455
Johan Hedberg42c6b122013-03-05 20:37:49 +02001456static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001457{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001458 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001459 struct hci_cp_write_def_link_policy cp;
1460 u16 link_policy = 0;
1461
1462 if (lmp_rswitch_capable(hdev))
1463 link_policy |= HCI_LP_RSWITCH;
1464 if (lmp_hold_capable(hdev))
1465 link_policy |= HCI_LP_HOLD;
1466 if (lmp_sniff_capable(hdev))
1467 link_policy |= HCI_LP_SNIFF;
1468 if (lmp_park_capable(hdev))
1469 link_policy |= HCI_LP_PARK;
1470
1471 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001472 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001473}
1474
Johan Hedberg42c6b122013-03-05 20:37:49 +02001475static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001476{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478 struct hci_cp_write_le_host_supported cp;
1479
Johan Hedbergc73eee92013-04-19 18:35:21 +03001480 /* LE-only devices do not support explicit enablement */
1481 if (!lmp_bredr_capable(hdev))
1482 return;
1483
Johan Hedberg2177bab2013-03-05 20:37:43 +02001484 memset(&cp, 0, sizeof(cp));
1485
1486 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1487 cp.le = 0x01;
1488 cp.simul = lmp_le_br_capable(hdev);
1489 }
1490
1491 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001492 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1493 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001494}
1495
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001496static void hci_set_event_mask_page_2(struct hci_request *req)
1497{
1498 struct hci_dev *hdev = req->hdev;
1499 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1500
1501 /* If Connectionless Slave Broadcast master role is supported
1502 * enable all necessary events for it.
1503 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001504 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001505 events[1] |= 0x40; /* Triggered Clock Capture */
1506 events[1] |= 0x80; /* Synchronization Train Complete */
1507 events[2] |= 0x10; /* Slave Page Response Timeout */
1508 events[2] |= 0x20; /* CSB Channel Map Change */
1509 }
1510
1511 /* If Connectionless Slave Broadcast slave role is supported
1512 * enable all necessary events for it.
1513 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001514 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001515 events[2] |= 0x01; /* Synchronization Train Received */
1516 events[2] |= 0x02; /* CSB Receive */
1517 events[2] |= 0x04; /* CSB Timeout */
1518 events[2] |= 0x08; /* Truncated Page Complete */
1519 }
1520
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001521 /* Enable Authenticated Payload Timeout Expired event if supported */
1522 if (lmp_ping_capable(hdev))
1523 events[2] |= 0x80;
1524
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001525 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1526}
1527
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001530 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001531 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001533 /* Some Broadcom based Bluetooth controllers do not support the
1534 * Delete Stored Link Key command. They are clearly indicating its
1535 * absence in the bit mask of supported commands.
1536 *
1537 * Check the supported commands and only if the the command is marked
1538 * as supported send it. If not supported assume that the controller
1539 * does not have actual support for stored link keys which makes this
1540 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001541 *
1542 * Some controllers indicate that they support handling deleting
1543 * stored link keys, but they don't. The quirk lets a driver
1544 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001545 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001546 if (hdev->commands[6] & 0x80 &&
1547 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001548 struct hci_cp_delete_stored_link_key cp;
1549
1550 bacpy(&cp.bdaddr, BDADDR_ANY);
1551 cp.delete_all = 0x01;
1552 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1553 sizeof(cp), &cp);
1554 }
1555
Johan Hedberg2177bab2013-03-05 20:37:43 +02001556 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558
Johan Hedberg7bf32042014-02-23 19:42:29 +02001559 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001560 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001561
1562 /* Read features beyond page 1 if available */
1563 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1564 struct hci_cp_read_local_ext_features cp;
1565
1566 cp.page = p;
1567 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1568 sizeof(cp), &cp);
1569 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570}
1571
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001572static void hci_init4_req(struct hci_request *req, unsigned long opt)
1573{
1574 struct hci_dev *hdev = req->hdev;
1575
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001576 /* Set event mask page 2 if the HCI command for it is supported */
1577 if (hdev->commands[22] & 0x04)
1578 hci_set_event_mask_page_2(req);
1579
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001580 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001581 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001582 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001583
1584 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001585 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001586 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001587 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1588 u8 support = 0x01;
1589 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1590 sizeof(support), &support);
1591 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001592}
1593
Johan Hedberg2177bab2013-03-05 20:37:43 +02001594static int __hci_init(struct hci_dev *hdev)
1595{
1596 int err;
1597
1598 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1599 if (err < 0)
1600 return err;
1601
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001602 /* The Device Under Test (DUT) mode is special and available for
1603 * all controller types. So just create it early on.
1604 */
1605 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1606 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1607 &dut_mode_fops);
1608 }
1609
Johan Hedberg2177bab2013-03-05 20:37:43 +02001610 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1611 * BR/EDR/LE type controllers. AMP controllers only need the
1612 * first stage init.
1613 */
1614 if (hdev->dev_type != HCI_BREDR)
1615 return 0;
1616
1617 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1618 if (err < 0)
1619 return err;
1620
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001621 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1622 if (err < 0)
1623 return err;
1624
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001625 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1626 if (err < 0)
1627 return err;
1628
1629 /* Only create debugfs entries during the initial setup
1630 * phase and not every time the controller gets powered on.
1631 */
1632 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1633 return 0;
1634
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001635 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1636 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001637 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1638 &hdev->manufacturer);
1639 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1640 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001641 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1642 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001643 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1644
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001645 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1646 &conn_info_min_age_fops);
1647 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1648 &conn_info_max_age_fops);
1649
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001650 if (lmp_bredr_capable(hdev)) {
1651 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1652 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001653 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1654 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001655 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1656 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001657 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1658 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001659 }
1660
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001661 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001662 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1663 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001664 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1665 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001666 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1667 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001668 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001669
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001670 if (lmp_sniff_capable(hdev)) {
1671 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1672 hdev, &idle_timeout_fops);
1673 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1674 hdev, &sniff_min_interval_fops);
1675 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1676 hdev, &sniff_max_interval_fops);
1677 }
1678
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001679 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001680 debugfs_create_file("identity", 0400, hdev->debugfs,
1681 hdev, &identity_fops);
1682 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1683 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001684 debugfs_create_file("random_address", 0444, hdev->debugfs,
1685 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001686 debugfs_create_file("static_address", 0444, hdev->debugfs,
1687 hdev, &static_address_fops);
1688
1689 /* For controllers with a public address, provide a debug
1690 * option to force the usage of the configured static
1691 * address. By default the public address is used.
1692 */
1693 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1694 debugfs_create_file("force_static_address", 0644,
1695 hdev->debugfs, hdev,
1696 &force_static_address_fops);
1697
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001698 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1699 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001700 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1701 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001702 debugfs_create_file("identity_resolving_keys", 0400,
1703 hdev->debugfs, hdev,
1704 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001705 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1706 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001707 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1708 hdev, &conn_min_interval_fops);
1709 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1710 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001711 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1712 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001713 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1714 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001715 debugfs_create_u16("discov_interleaved_timeout", 0644,
1716 hdev->debugfs,
1717 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001718 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001719
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001720 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001721}
1722
Johan Hedberg42c6b122013-03-05 20:37:49 +02001723static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724{
1725 __u8 scan = opt;
1726
Johan Hedberg42c6b122013-03-05 20:37:49 +02001727 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
1729 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001730 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731}
1732
Johan Hedberg42c6b122013-03-05 20:37:49 +02001733static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734{
1735 __u8 auth = opt;
1736
Johan Hedberg42c6b122013-03-05 20:37:49 +02001737 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
1739 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001740 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741}
1742
Johan Hedberg42c6b122013-03-05 20:37:49 +02001743static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744{
1745 __u8 encrypt = opt;
1746
Johan Hedberg42c6b122013-03-05 20:37:49 +02001747 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001749 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001750 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751}
1752
Johan Hedberg42c6b122013-03-05 20:37:49 +02001753static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001754{
1755 __le16 policy = cpu_to_le16(opt);
1756
Johan Hedberg42c6b122013-03-05 20:37:49 +02001757 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001758
1759 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001760 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001761}
1762
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001763/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 * Device is held on return. */
1765struct hci_dev *hci_dev_get(int index)
1766{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001767 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
1769 BT_DBG("%d", index);
1770
1771 if (index < 0)
1772 return NULL;
1773
1774 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001775 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 if (d->id == index) {
1777 hdev = hci_dev_hold(d);
1778 break;
1779 }
1780 }
1781 read_unlock(&hci_dev_list_lock);
1782 return hdev;
1783}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
1785/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001786
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001787bool hci_discovery_active(struct hci_dev *hdev)
1788{
1789 struct discovery_state *discov = &hdev->discovery;
1790
Andre Guedes6fbe1952012-02-03 17:47:58 -03001791 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001792 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001793 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001794 return true;
1795
Andre Guedes6fbe1952012-02-03 17:47:58 -03001796 default:
1797 return false;
1798 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001799}
1800
Johan Hedbergff9ef572012-01-04 14:23:45 +02001801void hci_discovery_set_state(struct hci_dev *hdev, int state)
1802{
1803 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1804
1805 if (hdev->discovery.state == state)
1806 return;
1807
1808 switch (state) {
1809 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001810 hci_update_background_scan(hdev);
1811
Andre Guedes7b99b652012-02-13 15:41:02 -03001812 if (hdev->discovery.state != DISCOVERY_STARTING)
1813 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001814 break;
1815 case DISCOVERY_STARTING:
1816 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001817 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001818 mgmt_discovering(hdev, 1);
1819 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001820 case DISCOVERY_RESOLVING:
1821 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001822 case DISCOVERY_STOPPING:
1823 break;
1824 }
1825
1826 hdev->discovery.state = state;
1827}
1828
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001829void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830{
Johan Hedberg30883512012-01-04 14:16:21 +02001831 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001832 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
Johan Hedberg561aafb2012-01-04 13:31:59 +02001834 list_for_each_entry_safe(p, n, &cache->all, all) {
1835 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001836 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001838
1839 INIT_LIST_HEAD(&cache->unknown);
1840 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841}
1842
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001843struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1844 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845{
Johan Hedberg30883512012-01-04 14:16:21 +02001846 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 struct inquiry_entry *e;
1848
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001849 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850
Johan Hedberg561aafb2012-01-04 13:31:59 +02001851 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001853 return e;
1854 }
1855
1856 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857}
1858
Johan Hedberg561aafb2012-01-04 13:31:59 +02001859struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001860 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001861{
Johan Hedberg30883512012-01-04 14:16:21 +02001862 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001863 struct inquiry_entry *e;
1864
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001865 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001866
1867 list_for_each_entry(e, &cache->unknown, list) {
1868 if (!bacmp(&e->data.bdaddr, bdaddr))
1869 return e;
1870 }
1871
1872 return NULL;
1873}
1874
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001875struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001876 bdaddr_t *bdaddr,
1877 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001878{
1879 struct discovery_state *cache = &hdev->discovery;
1880 struct inquiry_entry *e;
1881
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001882 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001883
1884 list_for_each_entry(e, &cache->resolve, list) {
1885 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1886 return e;
1887 if (!bacmp(&e->data.bdaddr, bdaddr))
1888 return e;
1889 }
1890
1891 return NULL;
1892}
1893
Johan Hedberga3d4e202012-01-09 00:53:02 +02001894void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001895 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001896{
1897 struct discovery_state *cache = &hdev->discovery;
1898 struct list_head *pos = &cache->resolve;
1899 struct inquiry_entry *p;
1900
1901 list_del(&ie->list);
1902
1903 list_for_each_entry(p, &cache->resolve, list) {
1904 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001905 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001906 break;
1907 pos = &p->list;
1908 }
1909
1910 list_add(&ie->list, pos);
1911}
1912
Johan Hedberg31754052012-01-04 13:39:52 +02001913bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001914 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915{
Johan Hedberg30883512012-01-04 14:16:21 +02001916 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001917 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001919 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920
Szymon Janc2b2fec42012-11-20 11:38:54 +01001921 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1922
Johan Hedberg01735bb2014-03-25 12:06:18 +02001923 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001924
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001925 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001926 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02001927 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001928 *ssp = true;
1929
Johan Hedberga3d4e202012-01-09 00:53:02 +02001930 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001931 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001932 ie->data.rssi = data->rssi;
1933 hci_inquiry_cache_update_resolve(hdev, ie);
1934 }
1935
Johan Hedberg561aafb2012-01-04 13:31:59 +02001936 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001937 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001938
Johan Hedberg561aafb2012-01-04 13:31:59 +02001939 /* Entry not in the cache. Add new one. */
1940 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1941 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001942 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001943
1944 list_add(&ie->all, &cache->all);
1945
1946 if (name_known) {
1947 ie->name_state = NAME_KNOWN;
1948 } else {
1949 ie->name_state = NAME_NOT_KNOWN;
1950 list_add(&ie->list, &cache->unknown);
1951 }
1952
1953update:
1954 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001955 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001956 ie->name_state = NAME_KNOWN;
1957 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 }
1959
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001960 memcpy(&ie->data, data, sizeof(*data));
1961 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001963
1964 if (ie->name_state == NAME_NOT_KNOWN)
1965 return false;
1966
1967 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968}
1969
1970static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1971{
Johan Hedberg30883512012-01-04 14:16:21 +02001972 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 struct inquiry_info *info = (struct inquiry_info *) buf;
1974 struct inquiry_entry *e;
1975 int copied = 0;
1976
Johan Hedberg561aafb2012-01-04 13:31:59 +02001977 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001979
1980 if (copied >= num)
1981 break;
1982
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 bacpy(&info->bdaddr, &data->bdaddr);
1984 info->pscan_rep_mode = data->pscan_rep_mode;
1985 info->pscan_period_mode = data->pscan_period_mode;
1986 info->pscan_mode = data->pscan_mode;
1987 memcpy(info->dev_class, data->dev_class, 3);
1988 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001991 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 }
1993
1994 BT_DBG("cache %p, copied %d", cache, copied);
1995 return copied;
1996}
1997
Johan Hedberg42c6b122013-03-05 20:37:49 +02001998static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999{
2000 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002001 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 struct hci_cp_inquiry cp;
2003
2004 BT_DBG("%s", hdev->name);
2005
2006 if (test_bit(HCI_INQUIRY, &hdev->flags))
2007 return;
2008
2009 /* Start Inquiry */
2010 memcpy(&cp.lap, &ir->lap, 3);
2011 cp.length = ir->length;
2012 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002013 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014}
2015
Andre Guedes3e13fa12013-03-27 20:04:56 -03002016static int wait_inquiry(void *word)
2017{
2018 schedule();
2019 return signal_pending(current);
2020}
2021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022int hci_inquiry(void __user *arg)
2023{
2024 __u8 __user *ptr = arg;
2025 struct hci_inquiry_req ir;
2026 struct hci_dev *hdev;
2027 int err = 0, do_inquiry = 0, max_rsp;
2028 long timeo;
2029 __u8 *buf;
2030
2031 if (copy_from_user(&ir, ptr, sizeof(ir)))
2032 return -EFAULT;
2033
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002034 hdev = hci_dev_get(ir.dev_id);
2035 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 return -ENODEV;
2037
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002038 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2039 err = -EBUSY;
2040 goto done;
2041 }
2042
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002043 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2044 err = -EOPNOTSUPP;
2045 goto done;
2046 }
2047
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002048 if (hdev->dev_type != HCI_BREDR) {
2049 err = -EOPNOTSUPP;
2050 goto done;
2051 }
2052
Johan Hedberg56f87902013-10-02 13:43:13 +03002053 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2054 err = -EOPNOTSUPP;
2055 goto done;
2056 }
2057
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002058 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002059 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002060 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002061 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 do_inquiry = 1;
2063 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002064 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Marcel Holtmann04837f62006-07-03 10:02:33 +02002066 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002067
2068 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002069 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2070 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002071 if (err < 0)
2072 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002073
2074 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2075 * cleared). If it is interrupted by a signal, return -EINTR.
2076 */
2077 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2078 TASK_INTERRUPTIBLE))
2079 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002080 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002082 /* for unlimited number of responses we will use buffer with
2083 * 255 entries
2084 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2086
2087 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2088 * copy it to the user space.
2089 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002090 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002091 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 err = -ENOMEM;
2093 goto done;
2094 }
2095
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002096 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002098 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
2100 BT_DBG("num_rsp %d", ir.num_rsp);
2101
2102 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2103 ptr += sizeof(ir);
2104 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002105 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002107 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 err = -EFAULT;
2109
2110 kfree(buf);
2111
2112done:
2113 hci_dev_put(hdev);
2114 return err;
2115}
2116
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002117static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 int ret = 0;
2120
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 BT_DBG("%s %p", hdev->name, hdev);
2122
2123 hci_req_lock(hdev);
2124
Johan Hovold94324962012-03-15 14:48:41 +01002125 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2126 ret = -ENODEV;
2127 goto done;
2128 }
2129
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002130 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2131 /* Check for rfkill but allow the HCI setup stage to
2132 * proceed (which in itself doesn't cause any RF activity).
2133 */
2134 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2135 ret = -ERFKILL;
2136 goto done;
2137 }
2138
2139 /* Check for valid public address or a configured static
2140 * random adddress, but let the HCI setup proceed to
2141 * be able to determine if there is a public address
2142 * or not.
2143 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002144 * In case of user channel usage, it is not important
2145 * if a public address or static random address is
2146 * available.
2147 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002148 * This check is only valid for BR/EDR controllers
2149 * since AMP controllers do not have an address.
2150 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002151 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2152 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002153 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2154 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2155 ret = -EADDRNOTAVAIL;
2156 goto done;
2157 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002158 }
2159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 if (test_bit(HCI_UP, &hdev->flags)) {
2161 ret = -EALREADY;
2162 goto done;
2163 }
2164
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 if (hdev->open(hdev)) {
2166 ret = -EIO;
2167 goto done;
2168 }
2169
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002170 atomic_set(&hdev->cmd_cnt, 1);
2171 set_bit(HCI_INIT, &hdev->flags);
2172
2173 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2174 ret = hdev->setup(hdev);
2175
2176 if (!ret) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002177 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002178 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002179 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 }
2181
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002182 clear_bit(HCI_INIT, &hdev->flags);
2183
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 if (!ret) {
2185 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002186 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 set_bit(HCI_UP, &hdev->flags);
2188 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002189 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002190 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002191 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002192 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002193 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002194 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002195 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002196 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002198 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002199 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002200 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
2202 skb_queue_purge(&hdev->cmd_q);
2203 skb_queue_purge(&hdev->rx_q);
2204
2205 if (hdev->flush)
2206 hdev->flush(hdev);
2207
2208 if (hdev->sent_cmd) {
2209 kfree_skb(hdev->sent_cmd);
2210 hdev->sent_cmd = NULL;
2211 }
2212
2213 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002214 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 }
2216
2217done:
2218 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 return ret;
2220}
2221
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002222/* ---- HCI ioctl helpers ---- */
2223
2224int hci_dev_open(__u16 dev)
2225{
2226 struct hci_dev *hdev;
2227 int err;
2228
2229 hdev = hci_dev_get(dev);
2230 if (!hdev)
2231 return -ENODEV;
2232
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002233 /* Devices that are marked for raw-only usage can only be powered
2234 * up as user channel. Trying to bring them up as normal devices
2235 * will result into a failure. Only user channel operation is
2236 * possible.
2237 *
2238 * When this function is called for a user channel, the flag
2239 * HCI_USER_CHANNEL will be set first before attempting to
2240 * open the device.
2241 */
2242 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2243 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2244 err = -EOPNOTSUPP;
2245 goto done;
2246 }
2247
Johan Hedberge1d08f42013-10-01 22:44:50 +03002248 /* We need to ensure that no other power on/off work is pending
2249 * before proceeding to call hci_dev_do_open. This is
2250 * particularly important if the setup procedure has not yet
2251 * completed.
2252 */
2253 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2254 cancel_delayed_work(&hdev->power_off);
2255
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002256 /* After this call it is guaranteed that the setup procedure
2257 * has finished. This means that error conditions like RFKILL
2258 * or no valid public or static random address apply.
2259 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002260 flush_workqueue(hdev->req_workqueue);
2261
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002262 err = hci_dev_do_open(hdev);
2263
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002264done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002265 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002266 return err;
2267}
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269static int hci_dev_do_close(struct hci_dev *hdev)
2270{
2271 BT_DBG("%s %p", hdev->name, hdev);
2272
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002273 cancel_delayed_work(&hdev->power_off);
2274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 hci_req_cancel(hdev, ENODEV);
2276 hci_req_lock(hdev);
2277
2278 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002279 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 hci_req_unlock(hdev);
2281 return 0;
2282 }
2283
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002284 /* Flush RX and TX works */
2285 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002286 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002288 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002289 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002290 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002291 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002292 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002293 }
2294
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002295 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002296 cancel_delayed_work(&hdev->service_cache);
2297
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002298 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002299
2300 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2301 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002302
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002303 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002304 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002306 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002307 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
2309 hci_notify(hdev, HCI_DEV_DOWN);
2310
2311 if (hdev->flush)
2312 hdev->flush(hdev);
2313
2314 /* Reset device */
2315 skb_queue_purge(&hdev->cmd_q);
2316 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002317 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002318 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002319 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002321 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 clear_bit(HCI_INIT, &hdev->flags);
2323 }
2324
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002325 /* flush cmd work */
2326 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
2328 /* Drop queues */
2329 skb_queue_purge(&hdev->rx_q);
2330 skb_queue_purge(&hdev->cmd_q);
2331 skb_queue_purge(&hdev->raw_q);
2332
2333 /* Drop last sent command */
2334 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002335 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 kfree_skb(hdev->sent_cmd);
2337 hdev->sent_cmd = NULL;
2338 }
2339
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002340 kfree_skb(hdev->recv_evt);
2341 hdev->recv_evt = NULL;
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 /* After this point our queues are empty
2344 * and no tasks are scheduled. */
2345 hdev->close(hdev);
2346
Johan Hedberg35b973c2013-03-15 17:06:59 -05002347 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002348 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002349 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2350
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002351 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2352 if (hdev->dev_type == HCI_BREDR) {
2353 hci_dev_lock(hdev);
2354 mgmt_powered(hdev, 0);
2355 hci_dev_unlock(hdev);
2356 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002357 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002358
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002359 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002360 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002361
Johan Hedberge59fda82012-02-22 18:11:53 +02002362 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002363 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002364 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 hci_req_unlock(hdev);
2367
2368 hci_dev_put(hdev);
2369 return 0;
2370}
2371
2372int hci_dev_close(__u16 dev)
2373{
2374 struct hci_dev *hdev;
2375 int err;
2376
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002377 hdev = hci_dev_get(dev);
2378 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002380
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002381 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2382 err = -EBUSY;
2383 goto done;
2384 }
2385
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2387 cancel_delayed_work(&hdev->power_off);
2388
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002390
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002391done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 hci_dev_put(hdev);
2393 return err;
2394}
2395
2396int hci_dev_reset(__u16 dev)
2397{
2398 struct hci_dev *hdev;
2399 int ret = 0;
2400
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002401 hdev = hci_dev_get(dev);
2402 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 return -ENODEV;
2404
2405 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
Marcel Holtmann808a0492013-08-26 20:57:58 -07002407 if (!test_bit(HCI_UP, &hdev->flags)) {
2408 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002412 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2413 ret = -EBUSY;
2414 goto done;
2415 }
2416
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002417 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2418 ret = -EOPNOTSUPP;
2419 goto done;
2420 }
2421
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 /* Drop queues */
2423 skb_queue_purge(&hdev->rx_q);
2424 skb_queue_purge(&hdev->cmd_q);
2425
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002426 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002427 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002429 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430
2431 if (hdev->flush)
2432 hdev->flush(hdev);
2433
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002434 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002435 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002437 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
2439done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 hci_req_unlock(hdev);
2441 hci_dev_put(hdev);
2442 return ret;
2443}
2444
2445int hci_dev_reset_stat(__u16 dev)
2446{
2447 struct hci_dev *hdev;
2448 int ret = 0;
2449
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002450 hdev = hci_dev_get(dev);
2451 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 return -ENODEV;
2453
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002454 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2455 ret = -EBUSY;
2456 goto done;
2457 }
2458
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002459 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2460 ret = -EOPNOTSUPP;
2461 goto done;
2462 }
2463
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2465
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002466done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 return ret;
2469}
2470
2471int hci_dev_cmd(unsigned int cmd, void __user *arg)
2472{
2473 struct hci_dev *hdev;
2474 struct hci_dev_req dr;
2475 int err = 0;
2476
2477 if (copy_from_user(&dr, arg, sizeof(dr)))
2478 return -EFAULT;
2479
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002480 hdev = hci_dev_get(dr.dev_id);
2481 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 return -ENODEV;
2483
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002484 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2485 err = -EBUSY;
2486 goto done;
2487 }
2488
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002489 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2490 err = -EOPNOTSUPP;
2491 goto done;
2492 }
2493
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002494 if (hdev->dev_type != HCI_BREDR) {
2495 err = -EOPNOTSUPP;
2496 goto done;
2497 }
2498
Johan Hedberg56f87902013-10-02 13:43:13 +03002499 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2500 err = -EOPNOTSUPP;
2501 goto done;
2502 }
2503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 switch (cmd) {
2505 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002506 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2507 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 break;
2509
2510 case HCISETENCRYPT:
2511 if (!lmp_encrypt_capable(hdev)) {
2512 err = -EOPNOTSUPP;
2513 break;
2514 }
2515
2516 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2517 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002518 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2519 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 if (err)
2521 break;
2522 }
2523
Johan Hedberg01178cd2013-03-05 20:37:41 +02002524 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2525 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 break;
2527
2528 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002529 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2530 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 break;
2532
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002533 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002534 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2535 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002536 break;
2537
2538 case HCISETLINKMODE:
2539 hdev->link_mode = ((__u16) dr.dev_opt) &
2540 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2541 break;
2542
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 case HCISETPTYPE:
2544 hdev->pkt_type = (__u16) dr.dev_opt;
2545 break;
2546
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002548 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2549 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 break;
2551
2552 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002553 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2554 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 break;
2556
2557 default:
2558 err = -EINVAL;
2559 break;
2560 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002561
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002562done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 hci_dev_put(hdev);
2564 return err;
2565}
2566
2567int hci_get_dev_list(void __user *arg)
2568{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002569 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 struct hci_dev_list_req *dl;
2571 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 int n = 0, size, err;
2573 __u16 dev_num;
2574
2575 if (get_user(dev_num, (__u16 __user *) arg))
2576 return -EFAULT;
2577
2578 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2579 return -EINVAL;
2580
2581 size = sizeof(*dl) + dev_num * sizeof(*dr);
2582
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002583 dl = kzalloc(size, GFP_KERNEL);
2584 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 return -ENOMEM;
2586
2587 dr = dl->dev_req;
2588
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002589 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002590 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002591 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002592 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002593
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002594 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2595 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 (dr + n)->dev_id = hdev->id;
2598 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 if (++n >= dev_num)
2601 break;
2602 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002603 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604
2605 dl->dev_num = n;
2606 size = sizeof(*dl) + n * sizeof(*dr);
2607
2608 err = copy_to_user(arg, dl, size);
2609 kfree(dl);
2610
2611 return err ? -EFAULT : 0;
2612}
2613
2614int hci_get_dev_info(void __user *arg)
2615{
2616 struct hci_dev *hdev;
2617 struct hci_dev_info di;
2618 int err = 0;
2619
2620 if (copy_from_user(&di, arg, sizeof(di)))
2621 return -EFAULT;
2622
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002623 hdev = hci_dev_get(di.dev_id);
2624 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 return -ENODEV;
2626
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002627 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002628 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002629
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002630 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2631 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002632
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 strcpy(di.name, hdev->name);
2634 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002635 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 di.flags = hdev->flags;
2637 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002638 if (lmp_bredr_capable(hdev)) {
2639 di.acl_mtu = hdev->acl_mtu;
2640 di.acl_pkts = hdev->acl_pkts;
2641 di.sco_mtu = hdev->sco_mtu;
2642 di.sco_pkts = hdev->sco_pkts;
2643 } else {
2644 di.acl_mtu = hdev->le_mtu;
2645 di.acl_pkts = hdev->le_pkts;
2646 di.sco_mtu = 0;
2647 di.sco_pkts = 0;
2648 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 di.link_policy = hdev->link_policy;
2650 di.link_mode = hdev->link_mode;
2651
2652 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2653 memcpy(&di.features, &hdev->features, sizeof(di.features));
2654
2655 if (copy_to_user(arg, &di, sizeof(di)))
2656 err = -EFAULT;
2657
2658 hci_dev_put(hdev);
2659
2660 return err;
2661}
2662
2663/* ---- Interface to HCI drivers ---- */
2664
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002665static int hci_rfkill_set_block(void *data, bool blocked)
2666{
2667 struct hci_dev *hdev = data;
2668
2669 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2670
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002671 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2672 return -EBUSY;
2673
Johan Hedberg5e130362013-09-13 08:58:17 +03002674 if (blocked) {
2675 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002676 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2677 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002678 } else {
2679 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002680 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002681
2682 return 0;
2683}
2684
2685static const struct rfkill_ops hci_rfkill_ops = {
2686 .set_block = hci_rfkill_set_block,
2687};
2688
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002689static void hci_power_on(struct work_struct *work)
2690{
2691 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002692 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002693
2694 BT_DBG("%s", hdev->name);
2695
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002696 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002697 if (err < 0) {
2698 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002699 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002700 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002701
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002702 /* During the HCI setup phase, a few error conditions are
2703 * ignored and they need to be checked now. If they are still
2704 * valid, it is important to turn the device back off.
2705 */
2706 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2707 (hdev->dev_type == HCI_BREDR &&
2708 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2709 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002710 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2711 hci_dev_do_close(hdev);
2712 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002713 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2714 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002715 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002716
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002717 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2718 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2719 mgmt_index_added(hdev);
2720 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002721}
2722
2723static void hci_power_off(struct work_struct *work)
2724{
Johan Hedberg32435532011-11-07 22:16:04 +02002725 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002726 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002727
2728 BT_DBG("%s", hdev->name);
2729
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002730 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002731}
2732
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002733static void hci_discov_off(struct work_struct *work)
2734{
2735 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002736
2737 hdev = container_of(work, struct hci_dev, discov_off.work);
2738
2739 BT_DBG("%s", hdev->name);
2740
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002741 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002742}
2743
Johan Hedberg35f74982014-02-18 17:14:32 +02002744void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002745{
Johan Hedberg48210022013-01-27 00:31:28 +02002746 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002747
Johan Hedberg48210022013-01-27 00:31:28 +02002748 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2749 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002750 kfree(uuid);
2751 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002752}
2753
Johan Hedberg35f74982014-02-18 17:14:32 +02002754void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002755{
2756 struct list_head *p, *n;
2757
2758 list_for_each_safe(p, n, &hdev->link_keys) {
2759 struct link_key *key;
2760
2761 key = list_entry(p, struct link_key, list);
2762
2763 list_del(p);
2764 kfree(key);
2765 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002766}
2767
Johan Hedberg35f74982014-02-18 17:14:32 +02002768void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002769{
2770 struct smp_ltk *k, *tmp;
2771
2772 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2773 list_del(&k->list);
2774 kfree(k);
2775 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002776}
2777
Johan Hedberg970c4e42014-02-18 10:19:33 +02002778void hci_smp_irks_clear(struct hci_dev *hdev)
2779{
2780 struct smp_irk *k, *tmp;
2781
2782 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2783 list_del(&k->list);
2784 kfree(k);
2785 }
2786}
2787
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002788struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2789{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002790 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002791
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002792 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002793 if (bacmp(bdaddr, &k->bdaddr) == 0)
2794 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002795
2796 return NULL;
2797}
2798
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302799static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002800 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002801{
2802 /* Legacy key */
2803 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302804 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002805
2806 /* Debug keys are insecure so don't store them persistently */
2807 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302808 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002809
2810 /* Changed combination key and there's no previous one */
2811 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302812 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002813
2814 /* Security mode 3 case */
2815 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302816 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002817
2818 /* Neither local nor remote side had no-bonding as requirement */
2819 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302820 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002821
2822 /* Local side had dedicated bonding as requirement */
2823 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302824 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002825
2826 /* Remote side had dedicated bonding as requirement */
2827 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302828 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002829
2830 /* If none of the above criteria match, then don't store the key
2831 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302832 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002833}
2834
Johan Hedberg98a0b842014-01-30 19:40:00 -08002835static bool ltk_type_master(u8 type)
2836{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002837 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002838}
2839
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002840struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002841 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002842{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002843 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002844
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002845 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002846 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002847 continue;
2848
Johan Hedberg98a0b842014-01-30 19:40:00 -08002849 if (ltk_type_master(k->type) != master)
2850 continue;
2851
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002852 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002853 }
2854
2855 return NULL;
2856}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002857
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002858struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002859 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002860{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002861 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002862
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002863 list_for_each_entry(k, &hdev->long_term_keys, list)
2864 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002865 bacmp(bdaddr, &k->bdaddr) == 0 &&
2866 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002867 return k;
2868
2869 return NULL;
2870}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002871
Johan Hedberg970c4e42014-02-18 10:19:33 +02002872struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2873{
2874 struct smp_irk *irk;
2875
2876 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2877 if (!bacmp(&irk->rpa, rpa))
2878 return irk;
2879 }
2880
2881 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2882 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2883 bacpy(&irk->rpa, rpa);
2884 return irk;
2885 }
2886 }
2887
2888 return NULL;
2889}
2890
2891struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2892 u8 addr_type)
2893{
2894 struct smp_irk *irk;
2895
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002896 /* Identity Address must be public or static random */
2897 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2898 return NULL;
2899
Johan Hedberg970c4e42014-02-18 10:19:33 +02002900 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2901 if (addr_type == irk->addr_type &&
2902 bacmp(bdaddr, &irk->bdaddr) == 0)
2903 return irk;
2904 }
2905
2906 return NULL;
2907}
2908
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002909struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002910 bdaddr_t *bdaddr, u8 *val, u8 type,
2911 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002912{
2913 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302914 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002915
2916 old_key = hci_find_link_key(hdev, bdaddr);
2917 if (old_key) {
2918 old_key_type = old_key->type;
2919 key = old_key;
2920 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002921 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002922 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002923 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002924 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002925 list_add(&key->list, &hdev->link_keys);
2926 }
2927
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002928 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002929
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002930 /* Some buggy controller combinations generate a changed
2931 * combination key for legacy pairing even when there's no
2932 * previous key */
2933 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002934 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002935 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002936 if (conn)
2937 conn->key_type = type;
2938 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002939
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002940 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002941 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002942 key->pin_len = pin_len;
2943
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002944 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002945 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002946 else
2947 key->type = type;
2948
Johan Hedberg7652ff62014-06-24 13:15:49 +03002949 if (persistent)
2950 *persistent = hci_persistent_key(hdev, conn, type,
2951 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002952
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002953 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002954}
2955
Johan Hedbergca9142b2014-02-19 14:57:44 +02002956struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002957 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002958 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002959{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002960 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002961 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002962
Johan Hedberg98a0b842014-01-30 19:40:00 -08002963 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002964 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002965 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002966 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002967 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002968 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002969 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002970 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002971 }
2972
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002973 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002974 key->bdaddr_type = addr_type;
2975 memcpy(key->val, tk, sizeof(key->val));
2976 key->authenticated = authenticated;
2977 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002978 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002979 key->enc_size = enc_size;
2980 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002981
Johan Hedbergca9142b2014-02-19 14:57:44 +02002982 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002983}
2984
Johan Hedbergca9142b2014-02-19 14:57:44 +02002985struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2986 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002987{
2988 struct smp_irk *irk;
2989
2990 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2991 if (!irk) {
2992 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2993 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002994 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002995
2996 bacpy(&irk->bdaddr, bdaddr);
2997 irk->addr_type = addr_type;
2998
2999 list_add(&irk->list, &hdev->identity_resolving_keys);
3000 }
3001
3002 memcpy(irk->val, val, 16);
3003 bacpy(&irk->rpa, rpa);
3004
Johan Hedbergca9142b2014-02-19 14:57:44 +02003005 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003006}
3007
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003008int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3009{
3010 struct link_key *key;
3011
3012 key = hci_find_link_key(hdev, bdaddr);
3013 if (!key)
3014 return -ENOENT;
3015
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003016 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003017
3018 list_del(&key->list);
3019 kfree(key);
3020
3021 return 0;
3022}
3023
Johan Hedberge0b2b272014-02-18 17:14:31 +02003024int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003025{
3026 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003027 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003028
3029 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003030 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003031 continue;
3032
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003033 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003034
3035 list_del(&k->list);
3036 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003037 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003038 }
3039
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003040 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003041}
3042
Johan Hedberga7ec7332014-02-18 17:14:35 +02003043void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3044{
3045 struct smp_irk *k, *tmp;
3046
Johan Hedberg668b7b12014-02-21 16:03:31 +02003047 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003048 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3049 continue;
3050
3051 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3052
3053 list_del(&k->list);
3054 kfree(k);
3055 }
3056}
3057
Ville Tervo6bd32322011-02-16 16:32:41 +02003058/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003059static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003060{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003061 struct hci_dev *hdev = container_of(work, struct hci_dev,
3062 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003063
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003064 if (hdev->sent_cmd) {
3065 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3066 u16 opcode = __le16_to_cpu(sent->opcode);
3067
3068 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3069 } else {
3070 BT_ERR("%s command tx timeout", hdev->name);
3071 }
3072
Ville Tervo6bd32322011-02-16 16:32:41 +02003073 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003074 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003075}
3076
Szymon Janc2763eda2011-03-22 13:12:22 +01003077struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003078 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003079{
3080 struct oob_data *data;
3081
3082 list_for_each_entry(data, &hdev->remote_oob_data, list)
3083 if (bacmp(bdaddr, &data->bdaddr) == 0)
3084 return data;
3085
3086 return NULL;
3087}
3088
3089int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3090{
3091 struct oob_data *data;
3092
3093 data = hci_find_remote_oob_data(hdev, bdaddr);
3094 if (!data)
3095 return -ENOENT;
3096
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003097 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003098
3099 list_del(&data->list);
3100 kfree(data);
3101
3102 return 0;
3103}
3104
Johan Hedberg35f74982014-02-18 17:14:32 +02003105void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003106{
3107 struct oob_data *data, *n;
3108
3109 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3110 list_del(&data->list);
3111 kfree(data);
3112 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003113}
3114
Marcel Holtmann07988722014-01-10 02:07:29 -08003115int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3116 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003117{
3118 struct oob_data *data;
3119
3120 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003121 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003122 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003123 if (!data)
3124 return -ENOMEM;
3125
3126 bacpy(&data->bdaddr, bdaddr);
3127 list_add(&data->list, &hdev->remote_oob_data);
3128 }
3129
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003130 memcpy(data->hash192, hash, sizeof(data->hash192));
3131 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003132
Marcel Holtmann07988722014-01-10 02:07:29 -08003133 memset(data->hash256, 0, sizeof(data->hash256));
3134 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3135
3136 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3137
3138 return 0;
3139}
3140
3141int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3142 u8 *hash192, u8 *randomizer192,
3143 u8 *hash256, u8 *randomizer256)
3144{
3145 struct oob_data *data;
3146
3147 data = hci_find_remote_oob_data(hdev, bdaddr);
3148 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003149 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003150 if (!data)
3151 return -ENOMEM;
3152
3153 bacpy(&data->bdaddr, bdaddr);
3154 list_add(&data->list, &hdev->remote_oob_data);
3155 }
3156
3157 memcpy(data->hash192, hash192, sizeof(data->hash192));
3158 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3159
3160 memcpy(data->hash256, hash256, sizeof(data->hash256));
3161 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3162
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003163 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003164
3165 return 0;
3166}
3167
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003168struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3169 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003170{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003171 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003172
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003173 list_for_each_entry(b, &hdev->blacklist, list) {
3174 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003175 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003176 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003177
3178 return NULL;
3179}
3180
Marcel Holtmannc9507492014-02-27 19:35:54 -08003181static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003182{
3183 struct list_head *p, *n;
3184
3185 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003186 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003187
3188 list_del(p);
3189 kfree(b);
3190 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003191}
3192
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003193int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003194{
3195 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003196
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003197 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003198 return -EBADF;
3199
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003200 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003201 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003202
3203 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003204 if (!entry)
3205 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003206
3207 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003208 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003209
3210 list_add(&entry->list, &hdev->blacklist);
3211
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003212 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003213}
3214
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003215int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003216{
3217 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003218
Johan Hedberg35f74982014-02-18 17:14:32 +02003219 if (!bacmp(bdaddr, BDADDR_ANY)) {
3220 hci_blacklist_clear(hdev);
3221 return 0;
3222 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003223
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003224 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003225 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003226 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003227
3228 list_del(&entry->list);
3229 kfree(entry);
3230
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003231 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003232}
3233
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003234struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3235 bdaddr_t *bdaddr, u8 type)
3236{
3237 struct bdaddr_list *b;
3238
3239 list_for_each_entry(b, &hdev->le_white_list, list) {
3240 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3241 return b;
3242 }
3243
3244 return NULL;
3245}
3246
3247void hci_white_list_clear(struct hci_dev *hdev)
3248{
3249 struct list_head *p, *n;
3250
3251 list_for_each_safe(p, n, &hdev->le_white_list) {
3252 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3253
3254 list_del(p);
3255 kfree(b);
3256 }
3257}
3258
3259int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3260{
3261 struct bdaddr_list *entry;
3262
3263 if (!bacmp(bdaddr, BDADDR_ANY))
3264 return -EBADF;
3265
3266 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3267 if (!entry)
3268 return -ENOMEM;
3269
3270 bacpy(&entry->bdaddr, bdaddr);
3271 entry->bdaddr_type = type;
3272
3273 list_add(&entry->list, &hdev->le_white_list);
3274
3275 return 0;
3276}
3277
3278int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3279{
3280 struct bdaddr_list *entry;
3281
3282 if (!bacmp(bdaddr, BDADDR_ANY))
3283 return -EBADF;
3284
3285 entry = hci_white_list_lookup(hdev, bdaddr, type);
3286 if (!entry)
3287 return -ENOENT;
3288
3289 list_del(&entry->list);
3290 kfree(entry);
3291
3292 return 0;
3293}
3294
Andre Guedes15819a72014-02-03 13:56:18 -03003295/* This function requires the caller holds hdev->lock */
3296struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3297 bdaddr_t *addr, u8 addr_type)
3298{
3299 struct hci_conn_params *params;
3300
3301 list_for_each_entry(params, &hdev->le_conn_params, list) {
3302 if (bacmp(&params->addr, addr) == 0 &&
3303 params->addr_type == addr_type) {
3304 return params;
3305 }
3306 }
3307
3308 return NULL;
3309}
3310
Andre Guedescef952c2014-02-26 20:21:49 -03003311static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3312{
3313 struct hci_conn *conn;
3314
3315 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3316 if (!conn)
3317 return false;
3318
3319 if (conn->dst_type != type)
3320 return false;
3321
3322 if (conn->state != BT_CONNECTED)
3323 return false;
3324
3325 return true;
3326}
3327
Andre Guedesa9b0a042014-02-26 20:21:52 -03003328static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3329{
3330 if (addr_type == ADDR_LE_DEV_PUBLIC)
3331 return true;
3332
3333 /* Check for Random Static address type */
3334 if ((addr->b[5] & 0xc0) == 0xc0)
3335 return true;
3336
3337 return false;
3338}
3339
Andre Guedes15819a72014-02-03 13:56:18 -03003340/* This function requires the caller holds hdev->lock */
Marcel Holtmann4b109662014-06-29 13:41:49 +02003341struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3342 bdaddr_t *addr, u8 addr_type)
3343{
3344 struct bdaddr_list *entry;
3345
3346 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3347 if (bacmp(&entry->bdaddr, addr) == 0 &&
3348 entry->bdaddr_type == addr_type)
3349 return entry;
3350 }
3351
3352 return NULL;
3353}
3354
3355/* This function requires the caller holds hdev->lock */
3356void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3357{
3358 struct bdaddr_list *entry;
3359
3360 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3361 if (entry)
3362 goto done;
3363
3364 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3365 if (!entry) {
3366 BT_ERR("Out of memory");
3367 return;
3368 }
3369
3370 bacpy(&entry->bdaddr, addr);
3371 entry->bdaddr_type = addr_type;
3372
3373 list_add(&entry->list, &hdev->pend_le_conns);
3374
3375 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3376
3377done:
3378 hci_update_background_scan(hdev);
3379}
3380
3381/* This function requires the caller holds hdev->lock */
3382void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3383{
3384 struct bdaddr_list *entry;
3385
3386 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3387 if (!entry)
3388 goto done;
3389
3390 list_del(&entry->list);
3391 kfree(entry);
3392
3393 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3394
3395done:
3396 hci_update_background_scan(hdev);
3397}
3398
3399/* This function requires the caller holds hdev->lock */
3400void hci_pend_le_conns_clear(struct hci_dev *hdev)
3401{
3402 struct bdaddr_list *entry, *tmp;
3403
3404 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3405 list_del(&entry->list);
3406 kfree(entry);
3407 }
3408
3409 BT_DBG("All LE pending connections cleared");
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02003410
3411 hci_update_background_scan(hdev);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003412}
3413
3414/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003415int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3416 u8 auto_connect, u16 conn_min_interval,
3417 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003418{
3419 struct hci_conn_params *params;
3420
Andre Guedesa9b0a042014-02-26 20:21:52 -03003421 if (!is_identity_address(addr, addr_type))
3422 return -EINVAL;
3423
Andre Guedes15819a72014-02-03 13:56:18 -03003424 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003425 if (params)
3426 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003427
3428 params = kzalloc(sizeof(*params), GFP_KERNEL);
3429 if (!params) {
3430 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003431 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003432 }
3433
3434 bacpy(&params->addr, addr);
3435 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003436
3437 list_add(&params->list, &hdev->le_conn_params);
3438
3439update:
Andre Guedes15819a72014-02-03 13:56:18 -03003440 params->conn_min_interval = conn_min_interval;
3441 params->conn_max_interval = conn_max_interval;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003442 params->conn_latency = hdev->le_conn_latency;
3443 params->supervision_timeout = hdev->le_supv_timeout;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003444 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003445
Andre Guedescef952c2014-02-26 20:21:49 -03003446 switch (auto_connect) {
3447 case HCI_AUTO_CONN_DISABLED:
3448 case HCI_AUTO_CONN_LINK_LOSS:
3449 hci_pend_le_conn_del(hdev, addr, addr_type);
3450 break;
3451 case HCI_AUTO_CONN_ALWAYS:
3452 if (!is_connected(hdev, addr, addr_type))
3453 hci_pend_le_conn_add(hdev, addr, addr_type);
3454 break;
3455 }
Andre Guedes15819a72014-02-03 13:56:18 -03003456
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003457 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3458 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3459 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003460
3461 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003462}
3463
3464/* This function requires the caller holds hdev->lock */
3465void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3466{
3467 struct hci_conn_params *params;
3468
3469 params = hci_conn_params_lookup(hdev, addr, addr_type);
3470 if (!params)
3471 return;
3472
Andre Guedescef952c2014-02-26 20:21:49 -03003473 hci_pend_le_conn_del(hdev, addr, addr_type);
3474
Andre Guedes15819a72014-02-03 13:56:18 -03003475 list_del(&params->list);
3476 kfree(params);
3477
3478 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3479}
3480
3481/* This function requires the caller holds hdev->lock */
3482void hci_conn_params_clear(struct hci_dev *hdev)
3483{
3484 struct hci_conn_params *params, *tmp;
3485
3486 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3487 list_del(&params->list);
3488 kfree(params);
3489 }
3490
Marcel Holtmann1089b672014-06-29 13:41:50 +02003491 hci_pend_le_conns_clear(hdev);
3492
Andre Guedes15819a72014-02-03 13:56:18 -03003493 BT_DBG("All LE connection parameters were removed");
3494}
3495
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003496static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003497{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003498 if (status) {
3499 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003500
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003501 hci_dev_lock(hdev);
3502 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3503 hci_dev_unlock(hdev);
3504 return;
3505 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003506}
3507
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003508static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003509{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003510 /* General inquiry access code (GIAC) */
3511 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3512 struct hci_request req;
3513 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003514 int err;
3515
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003516 if (status) {
3517 BT_ERR("Failed to disable LE scanning: status %d", status);
3518 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003519 }
3520
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003521 switch (hdev->discovery.type) {
3522 case DISCOV_TYPE_LE:
3523 hci_dev_lock(hdev);
3524 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3525 hci_dev_unlock(hdev);
3526 break;
3527
3528 case DISCOV_TYPE_INTERLEAVED:
3529 hci_req_init(&req, hdev);
3530
3531 memset(&cp, 0, sizeof(cp));
3532 memcpy(&cp.lap, lap, sizeof(cp.lap));
3533 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3534 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3535
3536 hci_dev_lock(hdev);
3537
3538 hci_inquiry_cache_flush(hdev);
3539
3540 err = hci_req_run(&req, inquiry_complete);
3541 if (err) {
3542 BT_ERR("Inquiry request failed: err %d", err);
3543 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3544 }
3545
3546 hci_dev_unlock(hdev);
3547 break;
3548 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003549}
3550
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003551static void le_scan_disable_work(struct work_struct *work)
3552{
3553 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003554 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003555 struct hci_request req;
3556 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003557
3558 BT_DBG("%s", hdev->name);
3559
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003560 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003561
Andre Guedesb1efcc22014-02-26 20:21:40 -03003562 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003563
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003564 err = hci_req_run(&req, le_scan_disable_work_complete);
3565 if (err)
3566 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003567}
3568
Johan Hedberg8d972502014-02-28 12:54:14 +02003569static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3570{
3571 struct hci_dev *hdev = req->hdev;
3572
3573 /* If we're advertising or initiating an LE connection we can't
3574 * go ahead and change the random address at this time. This is
3575 * because the eventual initiator address used for the
3576 * subsequently created connection will be undefined (some
3577 * controllers use the new address and others the one we had
3578 * when the operation started).
3579 *
3580 * In this kind of scenario skip the update and let the random
3581 * address be updated at the next cycle.
3582 */
3583 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3584 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3585 BT_DBG("Deferring random address update");
3586 return;
3587 }
3588
3589 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3590}
3591
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003592int hci_update_random_address(struct hci_request *req, bool require_privacy,
3593 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003594{
3595 struct hci_dev *hdev = req->hdev;
3596 int err;
3597
3598 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003599 * current RPA has expired or there is something else than
3600 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003601 */
3602 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003603 int to;
3604
3605 *own_addr_type = ADDR_LE_DEV_RANDOM;
3606
3607 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003608 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003609 return 0;
3610
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003611 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003612 if (err < 0) {
3613 BT_ERR("%s failed to generate new RPA", hdev->name);
3614 return err;
3615 }
3616
Johan Hedberg8d972502014-02-28 12:54:14 +02003617 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003618
3619 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3620 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3621
3622 return 0;
3623 }
3624
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003625 /* In case of required privacy without resolvable private address,
3626 * use an unresolvable private address. This is useful for active
3627 * scanning and non-connectable advertising.
3628 */
3629 if (require_privacy) {
3630 bdaddr_t urpa;
3631
3632 get_random_bytes(&urpa, 6);
3633 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3634
3635 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003636 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003637 return 0;
3638 }
3639
Johan Hedbergebd3a742014-02-23 19:42:21 +02003640 /* If forcing static address is in use or there is no public
3641 * address use the static address as random address (but skip
3642 * the HCI command if the current random address is already the
3643 * static one.
3644 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003645 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003646 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3647 *own_addr_type = ADDR_LE_DEV_RANDOM;
3648 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3649 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3650 &hdev->static_addr);
3651 return 0;
3652 }
3653
3654 /* Neither privacy nor static address is being used so use a
3655 * public address.
3656 */
3657 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3658
3659 return 0;
3660}
3661
Johan Hedberga1f4c312014-02-27 14:05:41 +02003662/* Copy the Identity Address of the controller.
3663 *
3664 * If the controller has a public BD_ADDR, then by default use that one.
3665 * If this is a LE only controller without a public address, default to
3666 * the static random address.
3667 *
3668 * For debugging purposes it is possible to force controllers with a
3669 * public address to use the static random address instead.
3670 */
3671void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3672 u8 *bdaddr_type)
3673{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003674 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003675 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3676 bacpy(bdaddr, &hdev->static_addr);
3677 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3678 } else {
3679 bacpy(bdaddr, &hdev->bdaddr);
3680 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3681 }
3682}
3683
David Herrmann9be0dab2012-04-22 14:39:57 +02003684/* Alloc HCI device */
3685struct hci_dev *hci_alloc_dev(void)
3686{
3687 struct hci_dev *hdev;
3688
3689 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3690 if (!hdev)
3691 return NULL;
3692
David Herrmannb1b813d2012-04-22 14:39:58 +02003693 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3694 hdev->esco_type = (ESCO_HV1);
3695 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003696 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3697 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003698 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3699 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003700
David Herrmannb1b813d2012-04-22 14:39:58 +02003701 hdev->sniff_max_interval = 800;
3702 hdev->sniff_min_interval = 80;
3703
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003704 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003705 hdev->le_scan_interval = 0x0060;
3706 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003707 hdev->le_conn_min_interval = 0x0028;
3708 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003709 hdev->le_conn_latency = 0x0000;
3710 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003711
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003712 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003713 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003714 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3715 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003716
David Herrmannb1b813d2012-04-22 14:39:58 +02003717 mutex_init(&hdev->lock);
3718 mutex_init(&hdev->req_lock);
3719
3720 INIT_LIST_HEAD(&hdev->mgmt_pending);
3721 INIT_LIST_HEAD(&hdev->blacklist);
3722 INIT_LIST_HEAD(&hdev->uuids);
3723 INIT_LIST_HEAD(&hdev->link_keys);
3724 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003725 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003726 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003727 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003728 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003729 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003730 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003731
3732 INIT_WORK(&hdev->rx_work, hci_rx_work);
3733 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3734 INIT_WORK(&hdev->tx_work, hci_tx_work);
3735 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003736
David Herrmannb1b813d2012-04-22 14:39:58 +02003737 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3738 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3739 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3740
David Herrmannb1b813d2012-04-22 14:39:58 +02003741 skb_queue_head_init(&hdev->rx_q);
3742 skb_queue_head_init(&hdev->cmd_q);
3743 skb_queue_head_init(&hdev->raw_q);
3744
3745 init_waitqueue_head(&hdev->req_wait_q);
3746
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003747 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003748
David Herrmannb1b813d2012-04-22 14:39:58 +02003749 hci_init_sysfs(hdev);
3750 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003751
3752 return hdev;
3753}
3754EXPORT_SYMBOL(hci_alloc_dev);
3755
3756/* Free HCI device */
3757void hci_free_dev(struct hci_dev *hdev)
3758{
David Herrmann9be0dab2012-04-22 14:39:57 +02003759 /* will free via device release */
3760 put_device(&hdev->dev);
3761}
3762EXPORT_SYMBOL(hci_free_dev);
3763
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764/* Register HCI device */
3765int hci_register_dev(struct hci_dev *hdev)
3766{
David Herrmannb1b813d2012-04-22 14:39:58 +02003767 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768
David Herrmann010666a2012-01-07 15:47:07 +01003769 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 return -EINVAL;
3771
Mat Martineau08add512011-11-02 16:18:36 -07003772 /* Do not allow HCI_AMP devices to register at index 0,
3773 * so the index can be used as the AMP controller ID.
3774 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003775 switch (hdev->dev_type) {
3776 case HCI_BREDR:
3777 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3778 break;
3779 case HCI_AMP:
3780 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3781 break;
3782 default:
3783 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003785
Sasha Levin3df92b32012-05-27 22:36:56 +02003786 if (id < 0)
3787 return id;
3788
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789 sprintf(hdev->name, "hci%d", id);
3790 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003791
3792 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3793
Kees Cookd8537542013-07-03 15:04:57 -07003794 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3795 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003796 if (!hdev->workqueue) {
3797 error = -ENOMEM;
3798 goto err;
3799 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003800
Kees Cookd8537542013-07-03 15:04:57 -07003801 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3802 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003803 if (!hdev->req_workqueue) {
3804 destroy_workqueue(hdev->workqueue);
3805 error = -ENOMEM;
3806 goto err;
3807 }
3808
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003809 if (!IS_ERR_OR_NULL(bt_debugfs))
3810 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3811
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003812 dev_set_name(&hdev->dev, "%s", hdev->name);
3813
Johan Hedberg99780a72014-02-18 10:40:07 +02003814 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3815 CRYPTO_ALG_ASYNC);
3816 if (IS_ERR(hdev->tfm_aes)) {
3817 BT_ERR("Unable to create crypto context");
3818 error = PTR_ERR(hdev->tfm_aes);
3819 hdev->tfm_aes = NULL;
3820 goto err_wqueue;
3821 }
3822
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003823 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003824 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003825 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003827 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003828 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3829 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003830 if (hdev->rfkill) {
3831 if (rfkill_register(hdev->rfkill) < 0) {
3832 rfkill_destroy(hdev->rfkill);
3833 hdev->rfkill = NULL;
3834 }
3835 }
3836
Johan Hedberg5e130362013-09-13 08:58:17 +03003837 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3838 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3839
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003840 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003841 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003842
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003843 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003844 /* Assume BR/EDR support until proven otherwise (such as
3845 * through reading supported features during init.
3846 */
3847 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3848 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003849
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003850 write_lock(&hci_dev_list_lock);
3851 list_add(&hdev->list, &hci_dev_list);
3852 write_unlock(&hci_dev_list_lock);
3853
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003854 /* Devices that are marked for raw-only usage need to set
3855 * the HCI_RAW flag to indicate that only user channel is
3856 * supported.
3857 */
3858 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3859 set_bit(HCI_RAW, &hdev->flags);
3860
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003862 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863
Johan Hedberg19202572013-01-14 22:33:51 +02003864 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003865
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003867
Johan Hedberg99780a72014-02-18 10:40:07 +02003868err_tfm:
3869 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003870err_wqueue:
3871 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003872 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003873err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003874 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003875
David Herrmann33ca9542011-10-08 14:58:49 +02003876 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877}
3878EXPORT_SYMBOL(hci_register_dev);
3879
3880/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003881void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882{
Sasha Levin3df92b32012-05-27 22:36:56 +02003883 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003884
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003885 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886
Johan Hovold94324962012-03-15 14:48:41 +01003887 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3888
Sasha Levin3df92b32012-05-27 22:36:56 +02003889 id = hdev->id;
3890
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003891 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003893 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894
3895 hci_dev_do_close(hdev);
3896
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303897 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003898 kfree_skb(hdev->reassembly[i]);
3899
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003900 cancel_work_sync(&hdev->power_on);
3901
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003902 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003903 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3904 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003905 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003906 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003907 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003908 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003909
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003910 /* mgmt_index_removed should take care of emptying the
3911 * pending list */
3912 BUG_ON(!list_empty(&hdev->mgmt_pending));
3913
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 hci_notify(hdev, HCI_DEV_UNREG);
3915
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003916 if (hdev->rfkill) {
3917 rfkill_unregister(hdev->rfkill);
3918 rfkill_destroy(hdev->rfkill);
3919 }
3920
Johan Hedberg99780a72014-02-18 10:40:07 +02003921 if (hdev->tfm_aes)
3922 crypto_free_blkcipher(hdev->tfm_aes);
3923
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003924 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003925
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003926 debugfs_remove_recursive(hdev->debugfs);
3927
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003928 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003929 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003930
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003931 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003932 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003933 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003934 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003935 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003936 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003937 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003938 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003939 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003940 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003941
David Herrmanndc946bd2012-01-07 15:47:24 +01003942 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003943
3944 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945}
3946EXPORT_SYMBOL(hci_unregister_dev);
3947
3948/* Suspend HCI device */
3949int hci_suspend_dev(struct hci_dev *hdev)
3950{
3951 hci_notify(hdev, HCI_DEV_SUSPEND);
3952 return 0;
3953}
3954EXPORT_SYMBOL(hci_suspend_dev);
3955
3956/* Resume HCI device */
3957int hci_resume_dev(struct hci_dev *hdev)
3958{
3959 hci_notify(hdev, HCI_DEV_RESUME);
3960 return 0;
3961}
3962EXPORT_SYMBOL(hci_resume_dev);
3963
Marcel Holtmann76bca882009-11-18 00:40:39 +01003964/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003965int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003966{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003967 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003968 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003969 kfree_skb(skb);
3970 return -ENXIO;
3971 }
3972
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003973 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003974 bt_cb(skb)->incoming = 1;
3975
3976 /* Time stamp */
3977 __net_timestamp(skb);
3978
Marcel Holtmann76bca882009-11-18 00:40:39 +01003979 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003980 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003981
Marcel Holtmann76bca882009-11-18 00:40:39 +01003982 return 0;
3983}
3984EXPORT_SYMBOL(hci_recv_frame);
3985
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303986static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003987 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303988{
3989 int len = 0;
3990 int hlen = 0;
3991 int remain = count;
3992 struct sk_buff *skb;
3993 struct bt_skb_cb *scb;
3994
3995 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003996 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303997 return -EILSEQ;
3998
3999 skb = hdev->reassembly[index];
4000
4001 if (!skb) {
4002 switch (type) {
4003 case HCI_ACLDATA_PKT:
4004 len = HCI_MAX_FRAME_SIZE;
4005 hlen = HCI_ACL_HDR_SIZE;
4006 break;
4007 case HCI_EVENT_PKT:
4008 len = HCI_MAX_EVENT_SIZE;
4009 hlen = HCI_EVENT_HDR_SIZE;
4010 break;
4011 case HCI_SCODATA_PKT:
4012 len = HCI_MAX_SCO_SIZE;
4013 hlen = HCI_SCO_HDR_SIZE;
4014 break;
4015 }
4016
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004017 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304018 if (!skb)
4019 return -ENOMEM;
4020
4021 scb = (void *) skb->cb;
4022 scb->expect = hlen;
4023 scb->pkt_type = type;
4024
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304025 hdev->reassembly[index] = skb;
4026 }
4027
4028 while (count) {
4029 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004030 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304031
4032 memcpy(skb_put(skb, len), data, len);
4033
4034 count -= len;
4035 data += len;
4036 scb->expect -= len;
4037 remain = count;
4038
4039 switch (type) {
4040 case HCI_EVENT_PKT:
4041 if (skb->len == HCI_EVENT_HDR_SIZE) {
4042 struct hci_event_hdr *h = hci_event_hdr(skb);
4043 scb->expect = h->plen;
4044
4045 if (skb_tailroom(skb) < scb->expect) {
4046 kfree_skb(skb);
4047 hdev->reassembly[index] = NULL;
4048 return -ENOMEM;
4049 }
4050 }
4051 break;
4052
4053 case HCI_ACLDATA_PKT:
4054 if (skb->len == HCI_ACL_HDR_SIZE) {
4055 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4056 scb->expect = __le16_to_cpu(h->dlen);
4057
4058 if (skb_tailroom(skb) < scb->expect) {
4059 kfree_skb(skb);
4060 hdev->reassembly[index] = NULL;
4061 return -ENOMEM;
4062 }
4063 }
4064 break;
4065
4066 case HCI_SCODATA_PKT:
4067 if (skb->len == HCI_SCO_HDR_SIZE) {
4068 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4069 scb->expect = h->dlen;
4070
4071 if (skb_tailroom(skb) < scb->expect) {
4072 kfree_skb(skb);
4073 hdev->reassembly[index] = NULL;
4074 return -ENOMEM;
4075 }
4076 }
4077 break;
4078 }
4079
4080 if (scb->expect == 0) {
4081 /* Complete frame */
4082
4083 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004084 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304085
4086 hdev->reassembly[index] = NULL;
4087 return remain;
4088 }
4089 }
4090
4091 return remain;
4092}
4093
Marcel Holtmannef222012007-07-11 06:42:04 +02004094int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4095{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304096 int rem = 0;
4097
Marcel Holtmannef222012007-07-11 06:42:04 +02004098 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4099 return -EILSEQ;
4100
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004101 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004102 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304103 if (rem < 0)
4104 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004105
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304106 data += (count - rem);
4107 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004108 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004109
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304110 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004111}
4112EXPORT_SYMBOL(hci_recv_fragment);
4113
Suraj Sumangala99811512010-07-14 13:02:19 +05304114#define STREAM_REASSEMBLY 0
4115
4116int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4117{
4118 int type;
4119 int rem = 0;
4120
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004121 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304122 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4123
4124 if (!skb) {
4125 struct { char type; } *pkt;
4126
4127 /* Start of the frame */
4128 pkt = data;
4129 type = pkt->type;
4130
4131 data++;
4132 count--;
4133 } else
4134 type = bt_cb(skb)->pkt_type;
4135
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004136 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004137 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304138 if (rem < 0)
4139 return rem;
4140
4141 data += (count - rem);
4142 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004143 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304144
4145 return rem;
4146}
4147EXPORT_SYMBOL(hci_recv_stream_fragment);
4148
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149/* ---- Interface to upper protocols ---- */
4150
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151int hci_register_cb(struct hci_cb *cb)
4152{
4153 BT_DBG("%p name %s", cb, cb->name);
4154
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004155 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004157 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158
4159 return 0;
4160}
4161EXPORT_SYMBOL(hci_register_cb);
4162
4163int hci_unregister_cb(struct hci_cb *cb)
4164{
4165 BT_DBG("%p name %s", cb, cb->name);
4166
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004167 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004169 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170
4171 return 0;
4172}
4173EXPORT_SYMBOL(hci_unregister_cb);
4174
Marcel Holtmann51086992013-10-10 14:54:19 -07004175static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004177 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004179 /* Time stamp */
4180 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004182 /* Send copy to monitor */
4183 hci_send_to_monitor(hdev, skb);
4184
4185 if (atomic_read(&hdev->promisc)) {
4186 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004187 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 }
4189
4190 /* Get rid of skb owner, prior to sending to the driver. */
4191 skb_orphan(skb);
4192
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004193 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004194 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195}
4196
Johan Hedberg3119ae92013-03-05 20:37:44 +02004197void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4198{
4199 skb_queue_head_init(&req->cmd_q);
4200 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004201 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004202}
4203
4204int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4205{
4206 struct hci_dev *hdev = req->hdev;
4207 struct sk_buff *skb;
4208 unsigned long flags;
4209
4210 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4211
Andre Guedes5d73e032013-03-08 11:20:16 -03004212 /* If an error occured during request building, remove all HCI
4213 * commands queued on the HCI request queue.
4214 */
4215 if (req->err) {
4216 skb_queue_purge(&req->cmd_q);
4217 return req->err;
4218 }
4219
Johan Hedberg3119ae92013-03-05 20:37:44 +02004220 /* Do not allow empty requests */
4221 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004222 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004223
4224 skb = skb_peek_tail(&req->cmd_q);
4225 bt_cb(skb)->req.complete = complete;
4226
4227 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4228 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4229 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4230
4231 queue_work(hdev->workqueue, &hdev->cmd_work);
4232
4233 return 0;
4234}
4235
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004236static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004237 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238{
4239 int len = HCI_COMMAND_HDR_SIZE + plen;
4240 struct hci_command_hdr *hdr;
4241 struct sk_buff *skb;
4242
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004244 if (!skb)
4245 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246
4247 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004248 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 hdr->plen = plen;
4250
4251 if (plen)
4252 memcpy(skb_put(skb, plen), param, plen);
4253
4254 BT_DBG("skb len %d", skb->len);
4255
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004256 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004257
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004258 return skb;
4259}
4260
4261/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004262int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4263 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004264{
4265 struct sk_buff *skb;
4266
4267 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4268
4269 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4270 if (!skb) {
4271 BT_ERR("%s no memory for command", hdev->name);
4272 return -ENOMEM;
4273 }
4274
Johan Hedberg11714b32013-03-05 20:37:47 +02004275 /* Stand-alone HCI commands must be flaged as
4276 * single-command requests.
4277 */
4278 bt_cb(skb)->req.start = true;
4279
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004281 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282
4283 return 0;
4284}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285
Johan Hedberg71c76a12013-03-05 20:37:46 +02004286/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004287void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4288 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004289{
4290 struct hci_dev *hdev = req->hdev;
4291 struct sk_buff *skb;
4292
4293 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4294
Andre Guedes34739c12013-03-08 11:20:18 -03004295 /* If an error occured during request building, there is no point in
4296 * queueing the HCI command. We can simply return.
4297 */
4298 if (req->err)
4299 return;
4300
Johan Hedberg71c76a12013-03-05 20:37:46 +02004301 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4302 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004303 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4304 hdev->name, opcode);
4305 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004306 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004307 }
4308
4309 if (skb_queue_empty(&req->cmd_q))
4310 bt_cb(skb)->req.start = true;
4311
Johan Hedberg02350a72013-04-03 21:50:29 +03004312 bt_cb(skb)->req.event = event;
4313
Johan Hedberg71c76a12013-03-05 20:37:46 +02004314 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004315}
4316
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004317void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4318 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004319{
4320 hci_req_add_ev(req, opcode, plen, param, 0);
4321}
4322
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004324void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325{
4326 struct hci_command_hdr *hdr;
4327
4328 if (!hdev->sent_cmd)
4329 return NULL;
4330
4331 hdr = (void *) hdev->sent_cmd->data;
4332
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004333 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 return NULL;
4335
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004336 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337
4338 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4339}
4340
4341/* Send ACL data */
4342static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4343{
4344 struct hci_acl_hdr *hdr;
4345 int len = skb->len;
4346
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004347 skb_push(skb, HCI_ACL_HDR_SIZE);
4348 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004349 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004350 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4351 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352}
4353
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004354static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004355 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004357 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 struct hci_dev *hdev = conn->hdev;
4359 struct sk_buff *list;
4360
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004361 skb->len = skb_headlen(skb);
4362 skb->data_len = 0;
4363
4364 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004365
4366 switch (hdev->dev_type) {
4367 case HCI_BREDR:
4368 hci_add_acl_hdr(skb, conn->handle, flags);
4369 break;
4370 case HCI_AMP:
4371 hci_add_acl_hdr(skb, chan->handle, flags);
4372 break;
4373 default:
4374 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4375 return;
4376 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004377
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004378 list = skb_shinfo(skb)->frag_list;
4379 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380 /* Non fragmented */
4381 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4382
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004383 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384 } else {
4385 /* Fragmented */
4386 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4387
4388 skb_shinfo(skb)->frag_list = NULL;
4389
4390 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004391 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004393 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004394
4395 flags &= ~ACL_START;
4396 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397 do {
4398 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004399
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004400 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004401 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402
4403 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4404
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004405 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 } while (list);
4407
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004408 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004410}
4411
4412void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4413{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004414 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004415
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004416 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004417
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004418 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004420 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422
4423/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004424void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425{
4426 struct hci_dev *hdev = conn->hdev;
4427 struct hci_sco_hdr hdr;
4428
4429 BT_DBG("%s len %d", hdev->name, skb->len);
4430
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004431 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 hdr.dlen = skb->len;
4433
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004434 skb_push(skb, HCI_SCO_HDR_SIZE);
4435 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004436 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004438 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004439
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004441 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443
4444/* ---- HCI TX task (outgoing data) ---- */
4445
4446/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004447static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4448 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449{
4450 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004451 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004452 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004454 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004456
4457 rcu_read_lock();
4458
4459 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004460 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004462
4463 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4464 continue;
4465
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466 num++;
4467
4468 if (c->sent < min) {
4469 min = c->sent;
4470 conn = c;
4471 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004472
4473 if (hci_conn_num(hdev, type) == num)
4474 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 }
4476
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004477 rcu_read_unlock();
4478
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004480 int cnt, q;
4481
4482 switch (conn->type) {
4483 case ACL_LINK:
4484 cnt = hdev->acl_cnt;
4485 break;
4486 case SCO_LINK:
4487 case ESCO_LINK:
4488 cnt = hdev->sco_cnt;
4489 break;
4490 case LE_LINK:
4491 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4492 break;
4493 default:
4494 cnt = 0;
4495 BT_ERR("Unknown link type");
4496 }
4497
4498 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 *quote = q ? q : 1;
4500 } else
4501 *quote = 0;
4502
4503 BT_DBG("conn %p quote %d", conn, *quote);
4504 return conn;
4505}
4506
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004507static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508{
4509 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004510 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511
Ville Tervobae1f5d92011-02-10 22:38:53 -03004512 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004514 rcu_read_lock();
4515
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004517 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004518 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004519 BT_ERR("%s killing stalled connection %pMR",
4520 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004521 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522 }
4523 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004524
4525 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526}
4527
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004528static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4529 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004530{
4531 struct hci_conn_hash *h = &hdev->conn_hash;
4532 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004533 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004534 struct hci_conn *conn;
4535 int cnt, q, conn_num = 0;
4536
4537 BT_DBG("%s", hdev->name);
4538
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004539 rcu_read_lock();
4540
4541 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004542 struct hci_chan *tmp;
4543
4544 if (conn->type != type)
4545 continue;
4546
4547 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4548 continue;
4549
4550 conn_num++;
4551
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004552 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004553 struct sk_buff *skb;
4554
4555 if (skb_queue_empty(&tmp->data_q))
4556 continue;
4557
4558 skb = skb_peek(&tmp->data_q);
4559 if (skb->priority < cur_prio)
4560 continue;
4561
4562 if (skb->priority > cur_prio) {
4563 num = 0;
4564 min = ~0;
4565 cur_prio = skb->priority;
4566 }
4567
4568 num++;
4569
4570 if (conn->sent < min) {
4571 min = conn->sent;
4572 chan = tmp;
4573 }
4574 }
4575
4576 if (hci_conn_num(hdev, type) == conn_num)
4577 break;
4578 }
4579
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004580 rcu_read_unlock();
4581
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004582 if (!chan)
4583 return NULL;
4584
4585 switch (chan->conn->type) {
4586 case ACL_LINK:
4587 cnt = hdev->acl_cnt;
4588 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004589 case AMP_LINK:
4590 cnt = hdev->block_cnt;
4591 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004592 case SCO_LINK:
4593 case ESCO_LINK:
4594 cnt = hdev->sco_cnt;
4595 break;
4596 case LE_LINK:
4597 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4598 break;
4599 default:
4600 cnt = 0;
4601 BT_ERR("Unknown link type");
4602 }
4603
4604 q = cnt / num;
4605 *quote = q ? q : 1;
4606 BT_DBG("chan %p quote %d", chan, *quote);
4607 return chan;
4608}
4609
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004610static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4611{
4612 struct hci_conn_hash *h = &hdev->conn_hash;
4613 struct hci_conn *conn;
4614 int num = 0;
4615
4616 BT_DBG("%s", hdev->name);
4617
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004618 rcu_read_lock();
4619
4620 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004621 struct hci_chan *chan;
4622
4623 if (conn->type != type)
4624 continue;
4625
4626 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4627 continue;
4628
4629 num++;
4630
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004631 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004632 struct sk_buff *skb;
4633
4634 if (chan->sent) {
4635 chan->sent = 0;
4636 continue;
4637 }
4638
4639 if (skb_queue_empty(&chan->data_q))
4640 continue;
4641
4642 skb = skb_peek(&chan->data_q);
4643 if (skb->priority >= HCI_PRIO_MAX - 1)
4644 continue;
4645
4646 skb->priority = HCI_PRIO_MAX - 1;
4647
4648 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004649 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004650 }
4651
4652 if (hci_conn_num(hdev, type) == num)
4653 break;
4654 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004655
4656 rcu_read_unlock();
4657
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004658}
4659
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004660static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4661{
4662 /* Calculate count of blocks used by this packet */
4663 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4664}
4665
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004666static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667{
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004668 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 /* ACL tx timeout must be longer than maximum
4670 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004671 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004672 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004673 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004675}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004677static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004678{
4679 unsigned int cnt = hdev->acl_cnt;
4680 struct hci_chan *chan;
4681 struct sk_buff *skb;
4682 int quote;
4683
4684 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004685
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004686 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004687 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004688 u32 priority = (skb_peek(&chan->data_q))->priority;
4689 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004690 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004691 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004692
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004693 /* Stop if priority has changed */
4694 if (skb->priority < priority)
4695 break;
4696
4697 skb = skb_dequeue(&chan->data_q);
4698
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004699 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004700 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004701
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004702 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703 hdev->acl_last_tx = jiffies;
4704
4705 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004706 chan->sent++;
4707 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 }
4709 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004710
4711 if (cnt != hdev->acl_cnt)
4712 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713}
4714
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004715static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004716{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004717 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004718 struct hci_chan *chan;
4719 struct sk_buff *skb;
4720 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004721 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004722
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004723 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004724
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004725 BT_DBG("%s", hdev->name);
4726
4727 if (hdev->dev_type == HCI_AMP)
4728 type = AMP_LINK;
4729 else
4730 type = ACL_LINK;
4731
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004732 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004733 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004734 u32 priority = (skb_peek(&chan->data_q))->priority;
4735 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4736 int blocks;
4737
4738 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004739 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004740
4741 /* Stop if priority has changed */
4742 if (skb->priority < priority)
4743 break;
4744
4745 skb = skb_dequeue(&chan->data_q);
4746
4747 blocks = __get_blocks(hdev, skb);
4748 if (blocks > hdev->block_cnt)
4749 return;
4750
4751 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004752 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004753
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004754 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004755 hdev->acl_last_tx = jiffies;
4756
4757 hdev->block_cnt -= blocks;
4758 quote -= blocks;
4759
4760 chan->sent += blocks;
4761 chan->conn->sent += blocks;
4762 }
4763 }
4764
4765 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004766 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004767}
4768
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004769static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004770{
4771 BT_DBG("%s", hdev->name);
4772
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004773 /* No ACL link over BR/EDR controller */
4774 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4775 return;
4776
4777 /* No AMP link over AMP controller */
4778 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004779 return;
4780
4781 switch (hdev->flow_ctl_mode) {
4782 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4783 hci_sched_acl_pkt(hdev);
4784 break;
4785
4786 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4787 hci_sched_acl_blk(hdev);
4788 break;
4789 }
4790}
4791
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004793static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794{
4795 struct hci_conn *conn;
4796 struct sk_buff *skb;
4797 int quote;
4798
4799 BT_DBG("%s", hdev->name);
4800
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004801 if (!hci_conn_num(hdev, SCO_LINK))
4802 return;
4803
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4805 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4806 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004807 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808
4809 conn->sent++;
4810 if (conn->sent == ~0)
4811 conn->sent = 0;
4812 }
4813 }
4814}
4815
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004816static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004817{
4818 struct hci_conn *conn;
4819 struct sk_buff *skb;
4820 int quote;
4821
4822 BT_DBG("%s", hdev->name);
4823
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004824 if (!hci_conn_num(hdev, ESCO_LINK))
4825 return;
4826
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004827 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4828 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004829 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4830 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004831 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004832
4833 conn->sent++;
4834 if (conn->sent == ~0)
4835 conn->sent = 0;
4836 }
4837 }
4838}
4839
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004840static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004841{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004842 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004843 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004844 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004845
4846 BT_DBG("%s", hdev->name);
4847
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004848 if (!hci_conn_num(hdev, LE_LINK))
4849 return;
4850
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004851 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004852 /* LE tx timeout must be longer than maximum
4853 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004854 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004855 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004856 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004857 }
4858
4859 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004860 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004861 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004862 u32 priority = (skb_peek(&chan->data_q))->priority;
4863 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004864 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004865 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004866
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004867 /* Stop if priority has changed */
4868 if (skb->priority < priority)
4869 break;
4870
4871 skb = skb_dequeue(&chan->data_q);
4872
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004873 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004874 hdev->le_last_tx = jiffies;
4875
4876 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004877 chan->sent++;
4878 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004879 }
4880 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004881
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004882 if (hdev->le_pkts)
4883 hdev->le_cnt = cnt;
4884 else
4885 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004886
4887 if (cnt != tmp)
4888 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004889}
4890
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004891static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004893 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004894 struct sk_buff *skb;
4895
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004896 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004897 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898
Marcel Holtmann52de5992013-09-03 18:08:38 -07004899 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4900 /* Schedule queues and send stuff to HCI driver */
4901 hci_sched_acl(hdev);
4902 hci_sched_sco(hdev);
4903 hci_sched_esco(hdev);
4904 hci_sched_le(hdev);
4905 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004906
Linus Torvalds1da177e2005-04-16 15:20:36 -07004907 /* Send next queued raw (unknown type) packet */
4908 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004909 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004910}
4911
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004912/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913
4914/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004915static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916{
4917 struct hci_acl_hdr *hdr = (void *) skb->data;
4918 struct hci_conn *conn;
4919 __u16 handle, flags;
4920
4921 skb_pull(skb, HCI_ACL_HDR_SIZE);
4922
4923 handle = __le16_to_cpu(hdr->handle);
4924 flags = hci_flags(handle);
4925 handle = hci_handle(handle);
4926
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004927 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004928 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929
4930 hdev->stat.acl_rx++;
4931
4932 hci_dev_lock(hdev);
4933 conn = hci_conn_hash_lookup_handle(hdev, handle);
4934 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004935
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004937 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004938
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004940 l2cap_recv_acldata(conn, skb, flags);
4941 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004942 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004943 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004944 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945 }
4946
4947 kfree_skb(skb);
4948}
4949
4950/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004951static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952{
4953 struct hci_sco_hdr *hdr = (void *) skb->data;
4954 struct hci_conn *conn;
4955 __u16 handle;
4956
4957 skb_pull(skb, HCI_SCO_HDR_SIZE);
4958
4959 handle = __le16_to_cpu(hdr->handle);
4960
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004961 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962
4963 hdev->stat.sco_rx++;
4964
4965 hci_dev_lock(hdev);
4966 conn = hci_conn_hash_lookup_handle(hdev, handle);
4967 hci_dev_unlock(hdev);
4968
4969 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004970 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004971 sco_recv_scodata(conn, skb);
4972 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004973 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004974 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004975 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 }
4977
4978 kfree_skb(skb);
4979}
4980
Johan Hedberg9238f362013-03-05 20:37:48 +02004981static bool hci_req_is_complete(struct hci_dev *hdev)
4982{
4983 struct sk_buff *skb;
4984
4985 skb = skb_peek(&hdev->cmd_q);
4986 if (!skb)
4987 return true;
4988
4989 return bt_cb(skb)->req.start;
4990}
4991
Johan Hedberg42c6b122013-03-05 20:37:49 +02004992static void hci_resend_last(struct hci_dev *hdev)
4993{
4994 struct hci_command_hdr *sent;
4995 struct sk_buff *skb;
4996 u16 opcode;
4997
4998 if (!hdev->sent_cmd)
4999 return;
5000
5001 sent = (void *) hdev->sent_cmd->data;
5002 opcode = __le16_to_cpu(sent->opcode);
5003 if (opcode == HCI_OP_RESET)
5004 return;
5005
5006 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5007 if (!skb)
5008 return;
5009
5010 skb_queue_head(&hdev->cmd_q, skb);
5011 queue_work(hdev->workqueue, &hdev->cmd_work);
5012}
5013
Johan Hedberg9238f362013-03-05 20:37:48 +02005014void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5015{
5016 hci_req_complete_t req_complete = NULL;
5017 struct sk_buff *skb;
5018 unsigned long flags;
5019
5020 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5021
Johan Hedberg42c6b122013-03-05 20:37:49 +02005022 /* If the completed command doesn't match the last one that was
5023 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005024 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005025 if (!hci_sent_cmd_data(hdev, opcode)) {
5026 /* Some CSR based controllers generate a spontaneous
5027 * reset complete event during init and any pending
5028 * command will never be completed. In such a case we
5029 * need to resend whatever was the last sent
5030 * command.
5031 */
5032 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5033 hci_resend_last(hdev);
5034
Johan Hedberg9238f362013-03-05 20:37:48 +02005035 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005036 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005037
5038 /* If the command succeeded and there's still more commands in
5039 * this request the request is not yet complete.
5040 */
5041 if (!status && !hci_req_is_complete(hdev))
5042 return;
5043
5044 /* If this was the last command in a request the complete
5045 * callback would be found in hdev->sent_cmd instead of the
5046 * command queue (hdev->cmd_q).
5047 */
5048 if (hdev->sent_cmd) {
5049 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005050
5051 if (req_complete) {
5052 /* We must set the complete callback to NULL to
5053 * avoid calling the callback more than once if
5054 * this function gets called again.
5055 */
5056 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5057
Johan Hedberg9238f362013-03-05 20:37:48 +02005058 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005059 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005060 }
5061
5062 /* Remove all pending commands belonging to this request */
5063 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5064 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5065 if (bt_cb(skb)->req.start) {
5066 __skb_queue_head(&hdev->cmd_q, skb);
5067 break;
5068 }
5069
5070 req_complete = bt_cb(skb)->req.complete;
5071 kfree_skb(skb);
5072 }
5073 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5074
5075call_complete:
5076 if (req_complete)
5077 req_complete(hdev, status);
5078}
5079
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005080static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005082 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083 struct sk_buff *skb;
5084
5085 BT_DBG("%s", hdev->name);
5086
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005088 /* Send copy to monitor */
5089 hci_send_to_monitor(hdev, skb);
5090
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091 if (atomic_read(&hdev->promisc)) {
5092 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005093 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094 }
5095
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005096 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005097 kfree_skb(skb);
5098 continue;
5099 }
5100
5101 if (test_bit(HCI_INIT, &hdev->flags)) {
5102 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005103 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104 case HCI_ACLDATA_PKT:
5105 case HCI_SCODATA_PKT:
5106 kfree_skb(skb);
5107 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109 }
5110
5111 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005112 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005114 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115 hci_event_packet(hdev, skb);
5116 break;
5117
5118 case HCI_ACLDATA_PKT:
5119 BT_DBG("%s ACL data packet", hdev->name);
5120 hci_acldata_packet(hdev, skb);
5121 break;
5122
5123 case HCI_SCODATA_PKT:
5124 BT_DBG("%s SCO data packet", hdev->name);
5125 hci_scodata_packet(hdev, skb);
5126 break;
5127
5128 default:
5129 kfree_skb(skb);
5130 break;
5131 }
5132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133}
5134
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005135static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005136{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005137 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138 struct sk_buff *skb;
5139
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005140 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5141 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005144 if (atomic_read(&hdev->cmd_cnt)) {
5145 skb = skb_dequeue(&hdev->cmd_q);
5146 if (!skb)
5147 return;
5148
Wei Yongjun7585b972009-02-25 18:29:52 +08005149 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005151 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005152 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005154 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005155 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005156 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005157 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005158 schedule_delayed_work(&hdev->cmd_timer,
5159 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160 } else {
5161 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005162 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163 }
5164 }
5165}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005166
5167void hci_req_add_le_scan_disable(struct hci_request *req)
5168{
5169 struct hci_cp_le_set_scan_enable cp;
5170
5171 memset(&cp, 0, sizeof(cp));
5172 cp.enable = LE_SCAN_DISABLE;
5173 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5174}
Andre Guedesa4790db2014-02-26 20:21:47 -03005175
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005176void hci_req_add_le_passive_scan(struct hci_request *req)
5177{
5178 struct hci_cp_le_set_scan_param param_cp;
5179 struct hci_cp_le_set_scan_enable enable_cp;
5180 struct hci_dev *hdev = req->hdev;
5181 u8 own_addr_type;
5182
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005183 /* Set require_privacy to false since no SCAN_REQ are send
5184 * during passive scanning. Not using an unresolvable address
5185 * here is important so that peer devices using direct
5186 * advertising with our address will be correctly reported
5187 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005188 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005189 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005190 return;
5191
5192 memset(&param_cp, 0, sizeof(param_cp));
5193 param_cp.type = LE_SCAN_PASSIVE;
5194 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5195 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5196 param_cp.own_address_type = own_addr_type;
5197 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5198 &param_cp);
5199
5200 memset(&enable_cp, 0, sizeof(enable_cp));
5201 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005202 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005203 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5204 &enable_cp);
5205}
5206
Andre Guedesa4790db2014-02-26 20:21:47 -03005207static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5208{
5209 if (status)
5210 BT_DBG("HCI request failed to update background scanning: "
5211 "status 0x%2.2x", status);
5212}
5213
5214/* This function controls the background scanning based on hdev->pend_le_conns
5215 * list. If there are pending LE connection we start the background scanning,
5216 * otherwise we stop it.
5217 *
5218 * This function requires the caller holds hdev->lock.
5219 */
5220void hci_update_background_scan(struct hci_dev *hdev)
5221{
Andre Guedesa4790db2014-02-26 20:21:47 -03005222 struct hci_request req;
5223 struct hci_conn *conn;
5224 int err;
5225
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005226 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5227 return;
5228
Andre Guedesa4790db2014-02-26 20:21:47 -03005229 hci_req_init(&req, hdev);
5230
5231 if (list_empty(&hdev->pend_le_conns)) {
5232 /* If there is no pending LE connections, we should stop
5233 * the background scanning.
5234 */
5235
5236 /* If controller is not scanning we are done. */
5237 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5238 return;
5239
5240 hci_req_add_le_scan_disable(&req);
5241
5242 BT_DBG("%s stopping background scanning", hdev->name);
5243 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005244 /* If there is at least one pending LE connection, we should
5245 * keep the background scan running.
5246 */
5247
Andre Guedesa4790db2014-02-26 20:21:47 -03005248 /* If controller is connecting, we should not start scanning
5249 * since some controllers are not able to scan and connect at
5250 * the same time.
5251 */
5252 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5253 if (conn)
5254 return;
5255
Andre Guedes4340a122014-03-10 18:26:24 -03005256 /* If controller is currently scanning, we stop it to ensure we
5257 * don't miss any advertising (due to duplicates filter).
5258 */
5259 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5260 hci_req_add_le_scan_disable(&req);
5261
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005262 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005263
5264 BT_DBG("%s starting background scanning", hdev->name);
5265 }
5266
5267 err = hci_req_run(&req, update_background_scan_complete);
5268 if (err)
5269 BT_ERR("Failed to run HCI request: err %d", err);
5270}