blob: 9852449ac1046559b352171a2ac11b438cd0b3b1 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
Marcel Holtmann111902f2014-06-21 04:53:17 +0200361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
Marcel Holtmann111902f2014-06-21 04:53:17 +0200386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800387 return -EALREADY;
388
Marcel Holtmann111902f2014-06-21 04:53:17 +0200389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700427 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700486 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700514 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
Marcel Holtmannac345812014-02-23 12:44:25 -0800590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200593 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
Johan Hedberga1f4c312014-02-27 14:05:41 +0200598 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800599
Johan Hedberga1f4c312014-02-27 14:05:41 +0200600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800601 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700669{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700672
Marcel Holtmann111902f2014-06-21 04:53:17 +0200673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677}
678
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
682{
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
687
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
690
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700696 return -EINVAL;
697
Marcel Holtmann111902f2014-06-21 04:53:17 +0200698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800699 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700700
Marcel Holtmann111902f2014-06-21 04:53:17 +0200701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800702
703 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700704}
705
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
Marcel Holtmann92202182013-10-18 16:38:10 -0700712
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
Marcel Holtmann3698d702014-02-18 21:54:49 -0800738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800774 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800779 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700806 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700834 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800854static int adv_channel_map_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val < 0x01 || val > 0x07)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_adv_channel_map = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int adv_channel_map_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_adv_channel_map;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880 adv_channel_map_set, "%llu\n");
881
Andre Guedes7d474e02014-02-26 20:21:54 -0300882static int le_auto_conn_show(struct seq_file *sf, void *ptr)
883{
884 struct hci_dev *hdev = sf->private;
885 struct hci_conn_params *p;
886
887 hci_dev_lock(hdev);
888
889 list_for_each_entry(p, &hdev->le_conn_params, list) {
890 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
891 p->auto_connect);
892 }
893
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899static int le_auto_conn_open(struct inode *inode, struct file *file)
900{
901 return single_open(file, le_auto_conn_show, inode->i_private);
902}
903
904static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
905 size_t count, loff_t *offset)
906{
907 struct seq_file *sf = file->private_data;
908 struct hci_dev *hdev = sf->private;
909 u8 auto_connect = 0;
910 bdaddr_t addr;
911 u8 addr_type;
912 char *buf;
913 int err = 0;
914 int n;
915
916 /* Don't allow partial write */
917 if (*offset != 0)
918 return -EINVAL;
919
920 if (count < 3)
921 return -EINVAL;
922
Andre Guedes4408dd12014-03-24 16:08:48 -0300923 buf = memdup_user(data, count);
924 if (IS_ERR(buf))
925 return PTR_ERR(buf);
Andre Guedes7d474e02014-02-26 20:21:54 -0300926
927 if (memcmp(buf, "add", 3) == 0) {
928 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
929 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
930 &addr.b[1], &addr.b[0], &addr_type,
931 &auto_connect);
932
933 if (n < 7) {
934 err = -EINVAL;
935 goto done;
936 }
937
938 hci_dev_lock(hdev);
939 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
940 hdev->le_conn_min_interval,
941 hdev->le_conn_max_interval);
942 hci_dev_unlock(hdev);
943
944 if (err)
945 goto done;
946 } else if (memcmp(buf, "del", 3) == 0) {
947 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
948 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
949 &addr.b[1], &addr.b[0], &addr_type);
950
951 if (n < 7) {
952 err = -EINVAL;
953 goto done;
954 }
955
956 hci_dev_lock(hdev);
957 hci_conn_params_del(hdev, &addr, addr_type);
958 hci_dev_unlock(hdev);
959 } else if (memcmp(buf, "clr", 3) == 0) {
960 hci_dev_lock(hdev);
961 hci_conn_params_clear(hdev);
962 hci_pend_le_conns_clear(hdev);
963 hci_update_background_scan(hdev);
964 hci_dev_unlock(hdev);
965 } else {
966 err = -EINVAL;
967 }
968
969done:
970 kfree(buf);
971
972 if (err)
973 return err;
974 else
975 return count;
976}
977
978static const struct file_operations le_auto_conn_fops = {
979 .open = le_auto_conn_open,
980 .read = seq_read,
981 .write = le_auto_conn_write,
982 .llseek = seq_lseek,
983 .release = single_release,
984};
985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986/* ---- HCI requests ---- */
987
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
992 if (hdev->req_status == HCI_REQ_PEND) {
993 hdev->req_result = result;
994 hdev->req_status = HCI_REQ_DONE;
995 wake_up_interruptible(&hdev->req_wait_q);
996 }
997}
998
999static void hci_req_cancel(struct hci_dev *hdev, int err)
1000{
1001 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1002
1003 if (hdev->req_status == HCI_REQ_PEND) {
1004 hdev->req_result = err;
1005 hdev->req_status = HCI_REQ_CANCELED;
1006 wake_up_interruptible(&hdev->req_wait_q);
1007 }
1008}
1009
Fengguang Wu77a63e02013-04-20 16:24:31 +03001010static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1011 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001012{
1013 struct hci_ev_cmd_complete *ev;
1014 struct hci_event_hdr *hdr;
1015 struct sk_buff *skb;
1016
1017 hci_dev_lock(hdev);
1018
1019 skb = hdev->recv_evt;
1020 hdev->recv_evt = NULL;
1021
1022 hci_dev_unlock(hdev);
1023
1024 if (!skb)
1025 return ERR_PTR(-ENODATA);
1026
1027 if (skb->len < sizeof(*hdr)) {
1028 BT_ERR("Too short HCI event");
1029 goto failed;
1030 }
1031
1032 hdr = (void *) skb->data;
1033 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1034
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001035 if (event) {
1036 if (hdr->evt != event)
1037 goto failed;
1038 return skb;
1039 }
1040
Johan Hedberg75e84b72013-04-02 13:35:04 +03001041 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1042 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1043 goto failed;
1044 }
1045
1046 if (skb->len < sizeof(*ev)) {
1047 BT_ERR("Too short cmd_complete event");
1048 goto failed;
1049 }
1050
1051 ev = (void *) skb->data;
1052 skb_pull(skb, sizeof(*ev));
1053
1054 if (opcode == __le16_to_cpu(ev->opcode))
1055 return skb;
1056
1057 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1058 __le16_to_cpu(ev->opcode));
1059
1060failed:
1061 kfree_skb(skb);
1062 return ERR_PTR(-ENODATA);
1063}
1064
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001065struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001066 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001067{
1068 DECLARE_WAITQUEUE(wait, current);
1069 struct hci_request req;
1070 int err = 0;
1071
1072 BT_DBG("%s", hdev->name);
1073
1074 hci_req_init(&req, hdev);
1075
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001076 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001077
1078 hdev->req_status = HCI_REQ_PEND;
1079
1080 err = hci_req_run(&req, hci_req_sync_complete);
1081 if (err < 0)
1082 return ERR_PTR(err);
1083
1084 add_wait_queue(&hdev->req_wait_q, &wait);
1085 set_current_state(TASK_INTERRUPTIBLE);
1086
1087 schedule_timeout(timeout);
1088
1089 remove_wait_queue(&hdev->req_wait_q, &wait);
1090
1091 if (signal_pending(current))
1092 return ERR_PTR(-EINTR);
1093
1094 switch (hdev->req_status) {
1095 case HCI_REQ_DONE:
1096 err = -bt_to_errno(hdev->req_result);
1097 break;
1098
1099 case HCI_REQ_CANCELED:
1100 err = -hdev->req_result;
1101 break;
1102
1103 default:
1104 err = -ETIMEDOUT;
1105 break;
1106 }
1107
1108 hdev->req_status = hdev->req_result = 0;
1109
1110 BT_DBG("%s end: err %d", hdev->name, err);
1111
1112 if (err < 0)
1113 return ERR_PTR(err);
1114
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001115 return hci_get_cmd_complete(hdev, opcode, event);
1116}
1117EXPORT_SYMBOL(__hci_cmd_sync_ev);
1118
1119struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001120 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121{
1122 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001123}
1124EXPORT_SYMBOL(__hci_cmd_sync);
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001127static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001128 void (*func)(struct hci_request *req,
1129 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001130 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 DECLARE_WAITQUEUE(wait, current);
1134 int err = 0;
1135
1136 BT_DBG("%s start", hdev->name);
1137
Johan Hedberg42c6b122013-03-05 20:37:49 +02001138 hci_req_init(&req, hdev);
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 hdev->req_status = HCI_REQ_PEND;
1141
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001143
Johan Hedberg42c6b122013-03-05 20:37:49 +02001144 err = hci_req_run(&req, hci_req_sync_complete);
1145 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001146 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001147
1148 /* ENODATA means the HCI request command queue is empty.
1149 * This can happen when a request with conditionals doesn't
1150 * trigger any commands to be sent. This is normal behavior
1151 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 */
Andre Guedes920c8302013-03-08 11:20:15 -03001153 if (err == -ENODATA)
1154 return 0;
1155
1156 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001157 }
1158
Andre Guedesbc4445c2013-03-08 11:20:13 -03001159 add_wait_queue(&hdev->req_wait_q, &wait);
1160 set_current_state(TASK_INTERRUPTIBLE);
1161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 schedule_timeout(timeout);
1163
1164 remove_wait_queue(&hdev->req_wait_q, &wait);
1165
1166 if (signal_pending(current))
1167 return -EINTR;
1168
1169 switch (hdev->req_status) {
1170 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001171 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 break;
1173
1174 case HCI_REQ_CANCELED:
1175 err = -hdev->req_result;
1176 break;
1177
1178 default:
1179 err = -ETIMEDOUT;
1180 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
Johan Hedberga5040ef2011-01-10 13:28:59 +02001183 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
1185 BT_DBG("%s end: err %d", hdev->name, err);
1186
1187 return err;
1188}
1189
Johan Hedberg01178cd2013-03-05 20:37:41 +02001190static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001191 void (*req)(struct hci_request *req,
1192 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001193 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194{
1195 int ret;
1196
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001197 if (!test_bit(HCI_UP, &hdev->flags))
1198 return -ENETDOWN;
1199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 /* Serialize all requests */
1201 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001202 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 hci_req_unlock(hdev);
1204
1205 return ret;
1206}
1207
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
1212 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 set_bit(HCI_RESET, &req->hdev->flags);
1214 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215}
1216
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001224 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001226
1227 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001232{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001234
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001235 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001237
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001238 /* Read Local Supported Commands */
1239 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1240
1241 /* Read Local Supported Features */
1242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1243
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001244 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001246
1247 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001249
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001250 /* Read Flow Control Mode */
1251 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1252
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001253 /* Read Location Data */
1254 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001255}
1256
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001258{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001260
1261 BT_DBG("%s %ld", hdev->name, opt);
1262
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001263 /* Reset */
1264 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001265 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001266
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001267 switch (hdev->dev_type) {
1268 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001269 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001270 break;
1271
1272 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001274 break;
1275
1276 default:
1277 BT_ERR("Unknown device type %d", hdev->dev_type);
1278 break;
1279 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001280}
1281
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001283{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001284 struct hci_dev *hdev = req->hdev;
1285
Johan Hedberg2177bab2013-03-05 20:37:43 +02001286 __le16 param;
1287 __u8 flt_type;
1288
1289 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001290 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001291
1292 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
1295 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297
1298 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001299 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001301 /* Read Number of Supported IAC */
1302 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1303
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001304 /* Read Current IAC LAP */
1305 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1306
Johan Hedberg2177bab2013-03-05 20:37:43 +02001307 /* Clear Event Filters */
1308 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310
1311 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001312 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001314
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001315 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1316 * but it does not support page scan related HCI commands.
1317 */
1318 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001319 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1320 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1321 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322}
1323
Johan Hedberg42c6b122013-03-05 20:37:49 +02001324static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001325{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001326 struct hci_dev *hdev = req->hdev;
1327
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001329 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001330
1331 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001332 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001333
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001334 /* Read LE Supported States */
1335 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1336
Johan Hedberg2177bab2013-03-05 20:37:43 +02001337 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339
1340 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001341 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001343 /* Clear LE White List */
1344 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001345
1346 /* LE-only controllers have LE implicitly enabled */
1347 if (!lmp_bredr_capable(hdev))
1348 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349}
1350
1351static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1352{
1353 if (lmp_ext_inq_capable(hdev))
1354 return 0x02;
1355
1356 if (lmp_inq_rssi_capable(hdev))
1357 return 0x01;
1358
1359 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1360 hdev->lmp_subver == 0x0757)
1361 return 0x01;
1362
1363 if (hdev->manufacturer == 15) {
1364 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1365 return 0x01;
1366 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1367 return 0x01;
1368 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1369 return 0x01;
1370 }
1371
1372 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1373 hdev->lmp_subver == 0x1805)
1374 return 0x01;
1375
1376 return 0x00;
1377}
1378
Johan Hedberg42c6b122013-03-05 20:37:49 +02001379static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001380{
1381 u8 mode;
1382
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384
Johan Hedberg42c6b122013-03-05 20:37:49 +02001385 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386}
1387
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390 struct hci_dev *hdev = req->hdev;
1391
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392 /* The second byte is 0xff instead of 0x9f (two reserved bits
1393 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1394 * command otherwise.
1395 */
1396 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1397
1398 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1399 * any event mask for pre 1.2 devices.
1400 */
1401 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1402 return;
1403
1404 if (lmp_bredr_capable(hdev)) {
1405 events[4] |= 0x01; /* Flow Specification Complete */
1406 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1408 events[5] |= 0x08; /* Synchronous Connection Complete */
1409 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001410 } else {
1411 /* Use a different default for LE-only devices */
1412 memset(events, 0, sizeof(events));
1413 events[0] |= 0x10; /* Disconnection Complete */
1414 events[0] |= 0x80; /* Encryption Change */
1415 events[1] |= 0x08; /* Read Remote Version Information Complete */
1416 events[1] |= 0x20; /* Command Complete */
1417 events[1] |= 0x40; /* Command Status */
1418 events[1] |= 0x80; /* Hardware Error */
1419 events[2] |= 0x04; /* Number of Completed Packets */
1420 events[3] |= 0x02; /* Data Buffer Overflow */
1421 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422 }
1423
1424 if (lmp_inq_rssi_capable(hdev))
1425 events[4] |= 0x02; /* Inquiry Result with RSSI */
1426
1427 if (lmp_sniffsubr_capable(hdev))
1428 events[5] |= 0x20; /* Sniff Subrating */
1429
1430 if (lmp_pause_enc_capable(hdev))
1431 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1432
1433 if (lmp_ext_inq_capable(hdev))
1434 events[5] |= 0x40; /* Extended Inquiry Result */
1435
1436 if (lmp_no_flush_capable(hdev))
1437 events[7] |= 0x01; /* Enhanced Flush Complete */
1438
1439 if (lmp_lsto_capable(hdev))
1440 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1441
1442 if (lmp_ssp_capable(hdev)) {
1443 events[6] |= 0x01; /* IO Capability Request */
1444 events[6] |= 0x02; /* IO Capability Response */
1445 events[6] |= 0x04; /* User Confirmation Request */
1446 events[6] |= 0x08; /* User Passkey Request */
1447 events[6] |= 0x10; /* Remote OOB Data Request */
1448 events[6] |= 0x20; /* Simple Pairing Complete */
1449 events[7] |= 0x04; /* User Passkey Notification */
1450 events[7] |= 0x08; /* Keypress Notification */
1451 events[7] |= 0x10; /* Remote Host Supported
1452 * Features Notification
1453 */
1454 }
1455
1456 if (lmp_le_capable(hdev))
1457 events[7] |= 0x20; /* LE Meta-Event */
1458
Johan Hedberg42c6b122013-03-05 20:37:49 +02001459 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001460
1461 if (lmp_le_capable(hdev)) {
1462 memset(events, 0, sizeof(events));
1463 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001464 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1465 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001466 }
1467}
1468
Johan Hedberg42c6b122013-03-05 20:37:49 +02001469static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001470{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001471 struct hci_dev *hdev = req->hdev;
1472
Johan Hedberg2177bab2013-03-05 20:37:43 +02001473 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001474 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001475 else
1476 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477
1478 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001482
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001483 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1484 * local supported commands HCI command.
1485 */
1486 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001487 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001488
1489 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001490 /* When SSP is available, then the host features page
1491 * should also be available as well. However some
1492 * controllers list the max_page as 0 as long as SSP
1493 * has not been enabled. To achieve proper debugging
1494 * output, force the minimum max_page to 1 at least.
1495 */
1496 hdev->max_page = 0x01;
1497
Johan Hedberg2177bab2013-03-05 20:37:43 +02001498 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1499 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001500 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1501 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001502 } else {
1503 struct hci_cp_write_eir cp;
1504
1505 memset(hdev->eir, 0, sizeof(hdev->eir));
1506 memset(&cp, 0, sizeof(cp));
1507
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509 }
1510 }
1511
1512 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514
1515 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001517
1518 if (lmp_ext_feat_capable(hdev)) {
1519 struct hci_cp_read_local_ext_features cp;
1520
1521 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1523 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001524 }
1525
1526 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1527 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1529 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001530 }
1531}
1532
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001536 struct hci_cp_write_def_link_policy cp;
1537 u16 link_policy = 0;
1538
1539 if (lmp_rswitch_capable(hdev))
1540 link_policy |= HCI_LP_RSWITCH;
1541 if (lmp_hold_capable(hdev))
1542 link_policy |= HCI_LP_HOLD;
1543 if (lmp_sniff_capable(hdev))
1544 link_policy |= HCI_LP_SNIFF;
1545 if (lmp_park_capable(hdev))
1546 link_policy |= HCI_LP_PARK;
1547
1548 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001550}
1551
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001553{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555 struct hci_cp_write_le_host_supported cp;
1556
Johan Hedbergc73eee92013-04-19 18:35:21 +03001557 /* LE-only devices do not support explicit enablement */
1558 if (!lmp_bredr_capable(hdev))
1559 return;
1560
Johan Hedberg2177bab2013-03-05 20:37:43 +02001561 memset(&cp, 0, sizeof(cp));
1562
1563 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1564 cp.le = 0x01;
1565 cp.simul = lmp_le_br_capable(hdev);
1566 }
1567
1568 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001569 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1570 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571}
1572
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001573static void hci_set_event_mask_page_2(struct hci_request *req)
1574{
1575 struct hci_dev *hdev = req->hdev;
1576 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1577
1578 /* If Connectionless Slave Broadcast master role is supported
1579 * enable all necessary events for it.
1580 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001581 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001582 events[1] |= 0x40; /* Triggered Clock Capture */
1583 events[1] |= 0x80; /* Synchronization Train Complete */
1584 events[2] |= 0x10; /* Slave Page Response Timeout */
1585 events[2] |= 0x20; /* CSB Channel Map Change */
1586 }
1587
1588 /* If Connectionless Slave Broadcast slave role is supported
1589 * enable all necessary events for it.
1590 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001591 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001592 events[2] |= 0x01; /* Synchronization Train Received */
1593 events[2] |= 0x02; /* CSB Receive */
1594 events[2] |= 0x04; /* CSB Timeout */
1595 events[2] |= 0x08; /* Truncated Page Complete */
1596 }
1597
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001598 /* Enable Authenticated Payload Timeout Expired event if supported */
1599 if (lmp_ping_capable(hdev))
1600 events[2] |= 0x80;
1601
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001602 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1603}
1604
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001608 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001610 /* Some Broadcom based Bluetooth controllers do not support the
1611 * Delete Stored Link Key command. They are clearly indicating its
1612 * absence in the bit mask of supported commands.
1613 *
1614 * Check the supported commands and only if the the command is marked
1615 * as supported send it. If not supported assume that the controller
1616 * does not have actual support for stored link keys which makes this
1617 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001618 *
1619 * Some controllers indicate that they support handling deleting
1620 * stored link keys, but they don't. The quirk lets a driver
1621 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001622 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001623 if (hdev->commands[6] & 0x80 &&
1624 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001625 struct hci_cp_delete_stored_link_key cp;
1626
1627 bacpy(&cp.bdaddr, BDADDR_ANY);
1628 cp.delete_all = 0x01;
1629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1630 sizeof(cp), &cp);
1631 }
1632
Johan Hedberg2177bab2013-03-05 20:37:43 +02001633 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001634 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001635
Johan Hedberg7bf32042014-02-23 19:42:29 +02001636 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001637 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001638
1639 /* Read features beyond page 1 if available */
1640 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1641 struct hci_cp_read_local_ext_features cp;
1642
1643 cp.page = p;
1644 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1645 sizeof(cp), &cp);
1646 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001647}
1648
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001649static void hci_init4_req(struct hci_request *req, unsigned long opt)
1650{
1651 struct hci_dev *hdev = req->hdev;
1652
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001653 /* Set event mask page 2 if the HCI command for it is supported */
1654 if (hdev->commands[22] & 0x04)
1655 hci_set_event_mask_page_2(req);
1656
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001657 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001658 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001659 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001660
1661 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001662 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001663 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001664 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1665 u8 support = 0x01;
1666 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1667 sizeof(support), &support);
1668 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001669}
1670
Johan Hedberg2177bab2013-03-05 20:37:43 +02001671static int __hci_init(struct hci_dev *hdev)
1672{
1673 int err;
1674
1675 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1676 if (err < 0)
1677 return err;
1678
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001679 /* The Device Under Test (DUT) mode is special and available for
1680 * all controller types. So just create it early on.
1681 */
1682 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1683 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1684 &dut_mode_fops);
1685 }
1686
Johan Hedberg2177bab2013-03-05 20:37:43 +02001687 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1688 * BR/EDR/LE type controllers. AMP controllers only need the
1689 * first stage init.
1690 */
1691 if (hdev->dev_type != HCI_BREDR)
1692 return 0;
1693
1694 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1695 if (err < 0)
1696 return err;
1697
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001698 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1699 if (err < 0)
1700 return err;
1701
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001702 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1703 if (err < 0)
1704 return err;
1705
1706 /* Only create debugfs entries during the initial setup
1707 * phase and not every time the controller gets powered on.
1708 */
1709 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1710 return 0;
1711
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001712 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1713 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001714 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1715 &hdev->manufacturer);
1716 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1717 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001718 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1719 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001720 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1721
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001722 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1723 &conn_info_min_age_fops);
1724 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1725 &conn_info_max_age_fops);
1726
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001727 if (lmp_bredr_capable(hdev)) {
1728 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1729 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001730 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1731 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001732 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1733 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001734 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1735 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001736 }
1737
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001738 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001739 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1740 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001741 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1742 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001743 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1744 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001745 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001746
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001747 if (lmp_sniff_capable(hdev)) {
1748 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1749 hdev, &idle_timeout_fops);
1750 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1751 hdev, &sniff_min_interval_fops);
1752 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1753 hdev, &sniff_max_interval_fops);
1754 }
1755
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001756 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001757 debugfs_create_file("identity", 0400, hdev->debugfs,
1758 hdev, &identity_fops);
1759 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1760 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001761 debugfs_create_file("random_address", 0444, hdev->debugfs,
1762 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001763 debugfs_create_file("static_address", 0444, hdev->debugfs,
1764 hdev, &static_address_fops);
1765
1766 /* For controllers with a public address, provide a debug
1767 * option to force the usage of the configured static
1768 * address. By default the public address is used.
1769 */
1770 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1771 debugfs_create_file("force_static_address", 0644,
1772 hdev->debugfs, hdev,
1773 &force_static_address_fops);
1774
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001775 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1776 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001777 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1778 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001779 debugfs_create_file("identity_resolving_keys", 0400,
1780 hdev->debugfs, hdev,
1781 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001782 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1783 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001784 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1785 hdev, &conn_min_interval_fops);
1786 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1787 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001788 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1789 hdev, &adv_channel_map_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001790 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1791 &le_auto_conn_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001792 debugfs_create_u16("discov_interleaved_timeout", 0644,
1793 hdev->debugfs,
1794 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001795 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001796
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001797 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001798}
1799
Johan Hedberg42c6b122013-03-05 20:37:49 +02001800static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801{
1802 __u8 scan = opt;
1803
Johan Hedberg42c6b122013-03-05 20:37:49 +02001804 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
1806 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001807 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808}
1809
Johan Hedberg42c6b122013-03-05 20:37:49 +02001810static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811{
1812 __u8 auth = opt;
1813
Johan Hedberg42c6b122013-03-05 20:37:49 +02001814 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
1816 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001817 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819
Johan Hedberg42c6b122013-03-05 20:37:49 +02001820static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821{
1822 __u8 encrypt = opt;
1823
Johan Hedberg42c6b122013-03-05 20:37:49 +02001824 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001826 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001827 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828}
1829
Johan Hedberg42c6b122013-03-05 20:37:49 +02001830static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001831{
1832 __le16 policy = cpu_to_le16(opt);
1833
Johan Hedberg42c6b122013-03-05 20:37:49 +02001834 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001835
1836 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001837 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001838}
1839
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001840/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 * Device is held on return. */
1842struct hci_dev *hci_dev_get(int index)
1843{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001844 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
1846 BT_DBG("%d", index);
1847
1848 if (index < 0)
1849 return NULL;
1850
1851 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001852 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 if (d->id == index) {
1854 hdev = hci_dev_hold(d);
1855 break;
1856 }
1857 }
1858 read_unlock(&hci_dev_list_lock);
1859 return hdev;
1860}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
1862/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001863
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001864bool hci_discovery_active(struct hci_dev *hdev)
1865{
1866 struct discovery_state *discov = &hdev->discovery;
1867
Andre Guedes6fbe1952012-02-03 17:47:58 -03001868 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001869 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001870 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001871 return true;
1872
Andre Guedes6fbe1952012-02-03 17:47:58 -03001873 default:
1874 return false;
1875 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001876}
1877
Johan Hedbergff9ef572012-01-04 14:23:45 +02001878void hci_discovery_set_state(struct hci_dev *hdev, int state)
1879{
1880 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1881
1882 if (hdev->discovery.state == state)
1883 return;
1884
1885 switch (state) {
1886 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001887 hci_update_background_scan(hdev);
1888
Andre Guedes7b99b652012-02-13 15:41:02 -03001889 if (hdev->discovery.state != DISCOVERY_STARTING)
1890 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001891 break;
1892 case DISCOVERY_STARTING:
1893 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001894 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001895 mgmt_discovering(hdev, 1);
1896 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001897 case DISCOVERY_RESOLVING:
1898 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001899 case DISCOVERY_STOPPING:
1900 break;
1901 }
1902
1903 hdev->discovery.state = state;
1904}
1905
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001906void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907{
Johan Hedberg30883512012-01-04 14:16:21 +02001908 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001909 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
Johan Hedberg561aafb2012-01-04 13:31:59 +02001911 list_for_each_entry_safe(p, n, &cache->all, all) {
1912 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001913 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001915
1916 INIT_LIST_HEAD(&cache->unknown);
1917 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918}
1919
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001920struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1921 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922{
Johan Hedberg30883512012-01-04 14:16:21 +02001923 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 struct inquiry_entry *e;
1925
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001926 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
Johan Hedberg561aafb2012-01-04 13:31:59 +02001928 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001930 return e;
1931 }
1932
1933 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934}
1935
Johan Hedberg561aafb2012-01-04 13:31:59 +02001936struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001937 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001938{
Johan Hedberg30883512012-01-04 14:16:21 +02001939 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001940 struct inquiry_entry *e;
1941
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001942 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001943
1944 list_for_each_entry(e, &cache->unknown, list) {
1945 if (!bacmp(&e->data.bdaddr, bdaddr))
1946 return e;
1947 }
1948
1949 return NULL;
1950}
1951
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001952struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001953 bdaddr_t *bdaddr,
1954 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001955{
1956 struct discovery_state *cache = &hdev->discovery;
1957 struct inquiry_entry *e;
1958
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001959 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001960
1961 list_for_each_entry(e, &cache->resolve, list) {
1962 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1963 return e;
1964 if (!bacmp(&e->data.bdaddr, bdaddr))
1965 return e;
1966 }
1967
1968 return NULL;
1969}
1970
Johan Hedberga3d4e202012-01-09 00:53:02 +02001971void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001972 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001973{
1974 struct discovery_state *cache = &hdev->discovery;
1975 struct list_head *pos = &cache->resolve;
1976 struct inquiry_entry *p;
1977
1978 list_del(&ie->list);
1979
1980 list_for_each_entry(p, &cache->resolve, list) {
1981 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001982 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001983 break;
1984 pos = &p->list;
1985 }
1986
1987 list_add(&ie->list, pos);
1988}
1989
Johan Hedberg31754052012-01-04 13:39:52 +02001990bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001991 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992{
Johan Hedberg30883512012-01-04 14:16:21 +02001993 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001994 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001996 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Szymon Janc2b2fec42012-11-20 11:38:54 +01001998 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1999
Johan Hedberg01735bb2014-03-25 12:06:18 +02002000 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002001
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002002 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002003 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02002004 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002005 *ssp = true;
2006
Johan Hedberga3d4e202012-01-09 00:53:02 +02002007 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002008 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002009 ie->data.rssi = data->rssi;
2010 hci_inquiry_cache_update_resolve(hdev, ie);
2011 }
2012
Johan Hedberg561aafb2012-01-04 13:31:59 +02002013 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002014 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002015
Johan Hedberg561aafb2012-01-04 13:31:59 +02002016 /* Entry not in the cache. Add new one. */
2017 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2018 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002019 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002020
2021 list_add(&ie->all, &cache->all);
2022
2023 if (name_known) {
2024 ie->name_state = NAME_KNOWN;
2025 } else {
2026 ie->name_state = NAME_NOT_KNOWN;
2027 list_add(&ie->list, &cache->unknown);
2028 }
2029
2030update:
2031 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002032 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002033 ie->name_state = NAME_KNOWN;
2034 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 }
2036
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002037 memcpy(&ie->data, data, sizeof(*data));
2038 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002040
2041 if (ie->name_state == NAME_NOT_KNOWN)
2042 return false;
2043
2044 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045}
2046
2047static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2048{
Johan Hedberg30883512012-01-04 14:16:21 +02002049 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 struct inquiry_info *info = (struct inquiry_info *) buf;
2051 struct inquiry_entry *e;
2052 int copied = 0;
2053
Johan Hedberg561aafb2012-01-04 13:31:59 +02002054 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002056
2057 if (copied >= num)
2058 break;
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 bacpy(&info->bdaddr, &data->bdaddr);
2061 info->pscan_rep_mode = data->pscan_rep_mode;
2062 info->pscan_period_mode = data->pscan_period_mode;
2063 info->pscan_mode = data->pscan_mode;
2064 memcpy(info->dev_class, data->dev_class, 3);
2065 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002068 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 }
2070
2071 BT_DBG("cache %p, copied %d", cache, copied);
2072 return copied;
2073}
2074
Johan Hedberg42c6b122013-03-05 20:37:49 +02002075static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076{
2077 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002078 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 struct hci_cp_inquiry cp;
2080
2081 BT_DBG("%s", hdev->name);
2082
2083 if (test_bit(HCI_INQUIRY, &hdev->flags))
2084 return;
2085
2086 /* Start Inquiry */
2087 memcpy(&cp.lap, &ir->lap, 3);
2088 cp.length = ir->length;
2089 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002090 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091}
2092
Andre Guedes3e13fa12013-03-27 20:04:56 -03002093static int wait_inquiry(void *word)
2094{
2095 schedule();
2096 return signal_pending(current);
2097}
2098
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099int hci_inquiry(void __user *arg)
2100{
2101 __u8 __user *ptr = arg;
2102 struct hci_inquiry_req ir;
2103 struct hci_dev *hdev;
2104 int err = 0, do_inquiry = 0, max_rsp;
2105 long timeo;
2106 __u8 *buf;
2107
2108 if (copy_from_user(&ir, ptr, sizeof(ir)))
2109 return -EFAULT;
2110
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002111 hdev = hci_dev_get(ir.dev_id);
2112 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 return -ENODEV;
2114
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002115 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2116 err = -EBUSY;
2117 goto done;
2118 }
2119
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002120 if (hdev->dev_type != HCI_BREDR) {
2121 err = -EOPNOTSUPP;
2122 goto done;
2123 }
2124
Johan Hedberg56f87902013-10-02 13:43:13 +03002125 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2126 err = -EOPNOTSUPP;
2127 goto done;
2128 }
2129
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002130 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002131 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002132 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002133 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 do_inquiry = 1;
2135 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002136 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
Marcel Holtmann04837f62006-07-03 10:02:33 +02002138 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002139
2140 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002141 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2142 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002143 if (err < 0)
2144 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002145
2146 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2147 * cleared). If it is interrupted by a signal, return -EINTR.
2148 */
2149 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2150 TASK_INTERRUPTIBLE))
2151 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002154 /* for unlimited number of responses we will use buffer with
2155 * 255 entries
2156 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2158
2159 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2160 * copy it to the user space.
2161 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002162 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002163 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 err = -ENOMEM;
2165 goto done;
2166 }
2167
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002168 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002170 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
2172 BT_DBG("num_rsp %d", ir.num_rsp);
2173
2174 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2175 ptr += sizeof(ir);
2176 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002177 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002179 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 err = -EFAULT;
2181
2182 kfree(buf);
2183
2184done:
2185 hci_dev_put(hdev);
2186 return err;
2187}
2188
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002189static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 int ret = 0;
2192
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 BT_DBG("%s %p", hdev->name, hdev);
2194
2195 hci_req_lock(hdev);
2196
Johan Hovold94324962012-03-15 14:48:41 +01002197 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2198 ret = -ENODEV;
2199 goto done;
2200 }
2201
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002202 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2203 /* Check for rfkill but allow the HCI setup stage to
2204 * proceed (which in itself doesn't cause any RF activity).
2205 */
2206 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2207 ret = -ERFKILL;
2208 goto done;
2209 }
2210
2211 /* Check for valid public address or a configured static
2212 * random adddress, but let the HCI setup proceed to
2213 * be able to determine if there is a public address
2214 * or not.
2215 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002216 * In case of user channel usage, it is not important
2217 * if a public address or static random address is
2218 * available.
2219 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002220 * This check is only valid for BR/EDR controllers
2221 * since AMP controllers do not have an address.
2222 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002223 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2224 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002225 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2226 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2227 ret = -EADDRNOTAVAIL;
2228 goto done;
2229 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002230 }
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 if (test_bit(HCI_UP, &hdev->flags)) {
2233 ret = -EALREADY;
2234 goto done;
2235 }
2236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 if (hdev->open(hdev)) {
2238 ret = -EIO;
2239 goto done;
2240 }
2241
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002242 atomic_set(&hdev->cmd_cnt, 1);
2243 set_bit(HCI_INIT, &hdev->flags);
2244
2245 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2246 ret = hdev->setup(hdev);
2247
2248 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002249 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2250 set_bit(HCI_RAW, &hdev->flags);
2251
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002252 if (!test_bit(HCI_RAW, &hdev->flags) &&
2253 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002254 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 }
2256
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002257 clear_bit(HCI_INIT, &hdev->flags);
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 if (!ret) {
2260 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002261 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 set_bit(HCI_UP, &hdev->flags);
2263 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002264 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002266 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002267 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002268 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002269 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002270 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002271 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002273 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002274 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002275 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
2277 skb_queue_purge(&hdev->cmd_q);
2278 skb_queue_purge(&hdev->rx_q);
2279
2280 if (hdev->flush)
2281 hdev->flush(hdev);
2282
2283 if (hdev->sent_cmd) {
2284 kfree_skb(hdev->sent_cmd);
2285 hdev->sent_cmd = NULL;
2286 }
2287
2288 hdev->close(hdev);
2289 hdev->flags = 0;
2290 }
2291
2292done:
2293 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 return ret;
2295}
2296
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002297/* ---- HCI ioctl helpers ---- */
2298
2299int hci_dev_open(__u16 dev)
2300{
2301 struct hci_dev *hdev;
2302 int err;
2303
2304 hdev = hci_dev_get(dev);
2305 if (!hdev)
2306 return -ENODEV;
2307
Johan Hedberge1d08f42013-10-01 22:44:50 +03002308 /* We need to ensure that no other power on/off work is pending
2309 * before proceeding to call hci_dev_do_open. This is
2310 * particularly important if the setup procedure has not yet
2311 * completed.
2312 */
2313 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2314 cancel_delayed_work(&hdev->power_off);
2315
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002316 /* After this call it is guaranteed that the setup procedure
2317 * has finished. This means that error conditions like RFKILL
2318 * or no valid public or static random address apply.
2319 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002320 flush_workqueue(hdev->req_workqueue);
2321
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002322 err = hci_dev_do_open(hdev);
2323
2324 hci_dev_put(hdev);
2325
2326 return err;
2327}
2328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329static int hci_dev_do_close(struct hci_dev *hdev)
2330{
2331 BT_DBG("%s %p", hdev->name, hdev);
2332
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002333 cancel_delayed_work(&hdev->power_off);
2334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 hci_req_cancel(hdev, ENODEV);
2336 hci_req_lock(hdev);
2337
2338 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002339 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 hci_req_unlock(hdev);
2341 return 0;
2342 }
2343
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002344 /* Flush RX and TX works */
2345 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002346 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002348 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002349 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002350 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002351 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002352 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002353 }
2354
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002355 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002356 cancel_delayed_work(&hdev->service_cache);
2357
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002358 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002359
2360 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2361 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002362
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002363 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002364 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002366 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002367 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
2369 hci_notify(hdev, HCI_DEV_DOWN);
2370
2371 if (hdev->flush)
2372 hdev->flush(hdev);
2373
2374 /* Reset device */
2375 skb_queue_purge(&hdev->cmd_q);
2376 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002377 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002378 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002379 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002381 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 clear_bit(HCI_INIT, &hdev->flags);
2383 }
2384
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002385 /* flush cmd work */
2386 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
2388 /* Drop queues */
2389 skb_queue_purge(&hdev->rx_q);
2390 skb_queue_purge(&hdev->cmd_q);
2391 skb_queue_purge(&hdev->raw_q);
2392
2393 /* Drop last sent command */
2394 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002395 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 kfree_skb(hdev->sent_cmd);
2397 hdev->sent_cmd = NULL;
2398 }
2399
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002400 kfree_skb(hdev->recv_evt);
2401 hdev->recv_evt = NULL;
2402
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 /* After this point our queues are empty
2404 * and no tasks are scheduled. */
2405 hdev->close(hdev);
2406
Johan Hedberg35b973c2013-03-15 17:06:59 -05002407 /* Clear flags */
2408 hdev->flags = 0;
2409 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2410
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002411 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2412 if (hdev->dev_type == HCI_BREDR) {
2413 hci_dev_lock(hdev);
2414 mgmt_powered(hdev, 0);
2415 hci_dev_unlock(hdev);
2416 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002417 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002418
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002419 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002420 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002421
Johan Hedberge59fda82012-02-22 18:11:53 +02002422 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002423 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002424 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002425
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 hci_req_unlock(hdev);
2427
2428 hci_dev_put(hdev);
2429 return 0;
2430}
2431
2432int hci_dev_close(__u16 dev)
2433{
2434 struct hci_dev *hdev;
2435 int err;
2436
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002437 hdev = hci_dev_get(dev);
2438 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002440
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002441 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2442 err = -EBUSY;
2443 goto done;
2444 }
2445
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447 cancel_delayed_work(&hdev->power_off);
2448
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002450
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002451done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 hci_dev_put(hdev);
2453 return err;
2454}
2455
2456int hci_dev_reset(__u16 dev)
2457{
2458 struct hci_dev *hdev;
2459 int ret = 0;
2460
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002461 hdev = hci_dev_get(dev);
2462 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 return -ENODEV;
2464
2465 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
Marcel Holtmann808a0492013-08-26 20:57:58 -07002467 if (!test_bit(HCI_UP, &hdev->flags)) {
2468 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002472 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2473 ret = -EBUSY;
2474 goto done;
2475 }
2476
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 /* Drop queues */
2478 skb_queue_purge(&hdev->rx_q);
2479 skb_queue_purge(&hdev->cmd_q);
2480
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002481 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002482 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002484 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
2486 if (hdev->flush)
2487 hdev->flush(hdev);
2488
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002489 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002490 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491
2492 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002493 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
2495done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 hci_req_unlock(hdev);
2497 hci_dev_put(hdev);
2498 return ret;
2499}
2500
2501int hci_dev_reset_stat(__u16 dev)
2502{
2503 struct hci_dev *hdev;
2504 int ret = 0;
2505
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002506 hdev = hci_dev_get(dev);
2507 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 return -ENODEV;
2509
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002510 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2511 ret = -EBUSY;
2512 goto done;
2513 }
2514
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2516
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002517done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 return ret;
2520}
2521
2522int hci_dev_cmd(unsigned int cmd, void __user *arg)
2523{
2524 struct hci_dev *hdev;
2525 struct hci_dev_req dr;
2526 int err = 0;
2527
2528 if (copy_from_user(&dr, arg, sizeof(dr)))
2529 return -EFAULT;
2530
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002531 hdev = hci_dev_get(dr.dev_id);
2532 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 return -ENODEV;
2534
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002535 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2536 err = -EBUSY;
2537 goto done;
2538 }
2539
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002540 if (hdev->dev_type != HCI_BREDR) {
2541 err = -EOPNOTSUPP;
2542 goto done;
2543 }
2544
Johan Hedberg56f87902013-10-02 13:43:13 +03002545 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2546 err = -EOPNOTSUPP;
2547 goto done;
2548 }
2549
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 switch (cmd) {
2551 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002552 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2553 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 break;
2555
2556 case HCISETENCRYPT:
2557 if (!lmp_encrypt_capable(hdev)) {
2558 err = -EOPNOTSUPP;
2559 break;
2560 }
2561
2562 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2563 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002564 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2565 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 if (err)
2567 break;
2568 }
2569
Johan Hedberg01178cd2013-03-05 20:37:41 +02002570 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2571 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 break;
2573
2574 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002575 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2576 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 break;
2578
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002579 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002580 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2581 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002582 break;
2583
2584 case HCISETLINKMODE:
2585 hdev->link_mode = ((__u16) dr.dev_opt) &
2586 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2587 break;
2588
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 case HCISETPTYPE:
2590 hdev->pkt_type = (__u16) dr.dev_opt;
2591 break;
2592
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002594 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2595 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 break;
2597
2598 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002599 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2600 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 break;
2602
2603 default:
2604 err = -EINVAL;
2605 break;
2606 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002607
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002608done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 hci_dev_put(hdev);
2610 return err;
2611}
2612
2613int hci_get_dev_list(void __user *arg)
2614{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002615 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 struct hci_dev_list_req *dl;
2617 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 int n = 0, size, err;
2619 __u16 dev_num;
2620
2621 if (get_user(dev_num, (__u16 __user *) arg))
2622 return -EFAULT;
2623
2624 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2625 return -EINVAL;
2626
2627 size = sizeof(*dl) + dev_num * sizeof(*dr);
2628
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002629 dl = kzalloc(size, GFP_KERNEL);
2630 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 return -ENOMEM;
2632
2633 dr = dl->dev_req;
2634
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002635 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002636 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002637 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002638 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002639
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002640 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2641 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002642
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 (dr + n)->dev_id = hdev->id;
2644 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002645
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 if (++n >= dev_num)
2647 break;
2648 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002649 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650
2651 dl->dev_num = n;
2652 size = sizeof(*dl) + n * sizeof(*dr);
2653
2654 err = copy_to_user(arg, dl, size);
2655 kfree(dl);
2656
2657 return err ? -EFAULT : 0;
2658}
2659
2660int hci_get_dev_info(void __user *arg)
2661{
2662 struct hci_dev *hdev;
2663 struct hci_dev_info di;
2664 int err = 0;
2665
2666 if (copy_from_user(&di, arg, sizeof(di)))
2667 return -EFAULT;
2668
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002669 hdev = hci_dev_get(di.dev_id);
2670 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 return -ENODEV;
2672
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002673 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002674 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002675
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002676 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2677 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002678
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 strcpy(di.name, hdev->name);
2680 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002681 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 di.flags = hdev->flags;
2683 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002684 if (lmp_bredr_capable(hdev)) {
2685 di.acl_mtu = hdev->acl_mtu;
2686 di.acl_pkts = hdev->acl_pkts;
2687 di.sco_mtu = hdev->sco_mtu;
2688 di.sco_pkts = hdev->sco_pkts;
2689 } else {
2690 di.acl_mtu = hdev->le_mtu;
2691 di.acl_pkts = hdev->le_pkts;
2692 di.sco_mtu = 0;
2693 di.sco_pkts = 0;
2694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 di.link_policy = hdev->link_policy;
2696 di.link_mode = hdev->link_mode;
2697
2698 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2699 memcpy(&di.features, &hdev->features, sizeof(di.features));
2700
2701 if (copy_to_user(arg, &di, sizeof(di)))
2702 err = -EFAULT;
2703
2704 hci_dev_put(hdev);
2705
2706 return err;
2707}
2708
2709/* ---- Interface to HCI drivers ---- */
2710
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002711static int hci_rfkill_set_block(void *data, bool blocked)
2712{
2713 struct hci_dev *hdev = data;
2714
2715 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2716
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002717 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2718 return -EBUSY;
2719
Johan Hedberg5e130362013-09-13 08:58:17 +03002720 if (blocked) {
2721 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002722 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2723 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002724 } else {
2725 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002726 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002727
2728 return 0;
2729}
2730
2731static const struct rfkill_ops hci_rfkill_ops = {
2732 .set_block = hci_rfkill_set_block,
2733};
2734
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002735static void hci_power_on(struct work_struct *work)
2736{
2737 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002738 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002739
2740 BT_DBG("%s", hdev->name);
2741
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002742 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002743 if (err < 0) {
2744 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002745 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002746 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002747
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002748 /* During the HCI setup phase, a few error conditions are
2749 * ignored and they need to be checked now. If they are still
2750 * valid, it is important to turn the device back off.
2751 */
2752 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2753 (hdev->dev_type == HCI_BREDR &&
2754 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2755 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002756 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2757 hci_dev_do_close(hdev);
2758 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002759 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2760 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002761 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002762
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002763 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002764 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002765}
2766
2767static void hci_power_off(struct work_struct *work)
2768{
Johan Hedberg32435532011-11-07 22:16:04 +02002769 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002770 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002771
2772 BT_DBG("%s", hdev->name);
2773
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002774 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002775}
2776
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002777static void hci_discov_off(struct work_struct *work)
2778{
2779 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002780
2781 hdev = container_of(work, struct hci_dev, discov_off.work);
2782
2783 BT_DBG("%s", hdev->name);
2784
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002785 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002786}
2787
Johan Hedberg35f74982014-02-18 17:14:32 +02002788void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002789{
Johan Hedberg48210022013-01-27 00:31:28 +02002790 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002791
Johan Hedberg48210022013-01-27 00:31:28 +02002792 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2793 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002794 kfree(uuid);
2795 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002796}
2797
Johan Hedberg35f74982014-02-18 17:14:32 +02002798void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002799{
2800 struct list_head *p, *n;
2801
2802 list_for_each_safe(p, n, &hdev->link_keys) {
2803 struct link_key *key;
2804
2805 key = list_entry(p, struct link_key, list);
2806
2807 list_del(p);
2808 kfree(key);
2809 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002810}
2811
Johan Hedberg35f74982014-02-18 17:14:32 +02002812void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002813{
2814 struct smp_ltk *k, *tmp;
2815
2816 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2817 list_del(&k->list);
2818 kfree(k);
2819 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002820}
2821
Johan Hedberg970c4e42014-02-18 10:19:33 +02002822void hci_smp_irks_clear(struct hci_dev *hdev)
2823{
2824 struct smp_irk *k, *tmp;
2825
2826 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2827 list_del(&k->list);
2828 kfree(k);
2829 }
2830}
2831
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002832struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2833{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002834 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002835
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002836 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002837 if (bacmp(bdaddr, &k->bdaddr) == 0)
2838 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002839
2840 return NULL;
2841}
2842
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302843static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002844 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002845{
2846 /* Legacy key */
2847 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302848 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002849
2850 /* Debug keys are insecure so don't store them persistently */
2851 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302852 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002853
2854 /* Changed combination key and there's no previous one */
2855 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302856 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002857
2858 /* Security mode 3 case */
2859 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302860 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002861
2862 /* Neither local nor remote side had no-bonding as requirement */
2863 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302864 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002865
2866 /* Local side had dedicated bonding as requirement */
2867 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302868 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002869
2870 /* Remote side had dedicated bonding as requirement */
2871 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302872 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002873
2874 /* If none of the above criteria match, then don't store the key
2875 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302876 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002877}
2878
Johan Hedberg98a0b842014-01-30 19:40:00 -08002879static bool ltk_type_master(u8 type)
2880{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002881 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002882}
2883
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002884struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002885 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002886{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002887 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002888
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002889 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002890 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002891 continue;
2892
Johan Hedberg98a0b842014-01-30 19:40:00 -08002893 if (ltk_type_master(k->type) != master)
2894 continue;
2895
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002896 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002897 }
2898
2899 return NULL;
2900}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002901
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002902struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002903 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002904{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002905 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002906
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002907 list_for_each_entry(k, &hdev->long_term_keys, list)
2908 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002909 bacmp(bdaddr, &k->bdaddr) == 0 &&
2910 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002911 return k;
2912
2913 return NULL;
2914}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002915
Johan Hedberg970c4e42014-02-18 10:19:33 +02002916struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2917{
2918 struct smp_irk *irk;
2919
2920 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2921 if (!bacmp(&irk->rpa, rpa))
2922 return irk;
2923 }
2924
2925 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2926 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2927 bacpy(&irk->rpa, rpa);
2928 return irk;
2929 }
2930 }
2931
2932 return NULL;
2933}
2934
2935struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2936 u8 addr_type)
2937{
2938 struct smp_irk *irk;
2939
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002940 /* Identity Address must be public or static random */
2941 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2942 return NULL;
2943
Johan Hedberg970c4e42014-02-18 10:19:33 +02002944 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2945 if (addr_type == irk->addr_type &&
2946 bacmp(bdaddr, &irk->bdaddr) == 0)
2947 return irk;
2948 }
2949
2950 return NULL;
2951}
2952
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002953struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002954 bdaddr_t *bdaddr, u8 *val, u8 type,
2955 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002956{
2957 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302958 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002959
2960 old_key = hci_find_link_key(hdev, bdaddr);
2961 if (old_key) {
2962 old_key_type = old_key->type;
2963 key = old_key;
2964 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002965 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002966 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002967 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002968 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002969 list_add(&key->list, &hdev->link_keys);
2970 }
2971
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002972 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002973
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002974 /* Some buggy controller combinations generate a changed
2975 * combination key for legacy pairing even when there's no
2976 * previous key */
2977 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002978 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002979 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002980 if (conn)
2981 conn->key_type = type;
2982 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002983
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002984 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002985 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002986 key->pin_len = pin_len;
2987
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002988 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002989 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002990 else
2991 key->type = type;
2992
Johan Hedberg7652ff62014-06-24 13:15:49 +03002993 if (persistent)
2994 *persistent = hci_persistent_key(hdev, conn, type,
2995 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002996
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002997 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002998}
2999
Johan Hedbergca9142b2014-02-19 14:57:44 +02003000struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003001 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003002 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003003{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003004 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003005 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003006
Johan Hedberg98a0b842014-01-30 19:40:00 -08003007 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003008 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003009 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003010 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003011 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003012 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003013 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003014 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003015 }
3016
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003017 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003018 key->bdaddr_type = addr_type;
3019 memcpy(key->val, tk, sizeof(key->val));
3020 key->authenticated = authenticated;
3021 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003022 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003023 key->enc_size = enc_size;
3024 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003025
Johan Hedbergca9142b2014-02-19 14:57:44 +02003026 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003027}
3028
Johan Hedbergca9142b2014-02-19 14:57:44 +02003029struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3030 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003031{
3032 struct smp_irk *irk;
3033
3034 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3035 if (!irk) {
3036 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3037 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003038 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003039
3040 bacpy(&irk->bdaddr, bdaddr);
3041 irk->addr_type = addr_type;
3042
3043 list_add(&irk->list, &hdev->identity_resolving_keys);
3044 }
3045
3046 memcpy(irk->val, val, 16);
3047 bacpy(&irk->rpa, rpa);
3048
Johan Hedbergca9142b2014-02-19 14:57:44 +02003049 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003050}
3051
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003052int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3053{
3054 struct link_key *key;
3055
3056 key = hci_find_link_key(hdev, bdaddr);
3057 if (!key)
3058 return -ENOENT;
3059
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003060 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003061
3062 list_del(&key->list);
3063 kfree(key);
3064
3065 return 0;
3066}
3067
Johan Hedberge0b2b272014-02-18 17:14:31 +02003068int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003069{
3070 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003071 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003072
3073 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003074 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003075 continue;
3076
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003077 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003078
3079 list_del(&k->list);
3080 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003081 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003082 }
3083
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003084 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003085}
3086
Johan Hedberga7ec7332014-02-18 17:14:35 +02003087void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3088{
3089 struct smp_irk *k, *tmp;
3090
Johan Hedberg668b7b12014-02-21 16:03:31 +02003091 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003092 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3093 continue;
3094
3095 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3096
3097 list_del(&k->list);
3098 kfree(k);
3099 }
3100}
3101
Ville Tervo6bd32322011-02-16 16:32:41 +02003102/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003103static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003104{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003105 struct hci_dev *hdev = container_of(work, struct hci_dev,
3106 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003107
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003108 if (hdev->sent_cmd) {
3109 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3110 u16 opcode = __le16_to_cpu(sent->opcode);
3111
3112 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3113 } else {
3114 BT_ERR("%s command tx timeout", hdev->name);
3115 }
3116
Ville Tervo6bd32322011-02-16 16:32:41 +02003117 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003118 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003119}
3120
Szymon Janc2763eda2011-03-22 13:12:22 +01003121struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003122 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003123{
3124 struct oob_data *data;
3125
3126 list_for_each_entry(data, &hdev->remote_oob_data, list)
3127 if (bacmp(bdaddr, &data->bdaddr) == 0)
3128 return data;
3129
3130 return NULL;
3131}
3132
3133int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3134{
3135 struct oob_data *data;
3136
3137 data = hci_find_remote_oob_data(hdev, bdaddr);
3138 if (!data)
3139 return -ENOENT;
3140
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003141 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003142
3143 list_del(&data->list);
3144 kfree(data);
3145
3146 return 0;
3147}
3148
Johan Hedberg35f74982014-02-18 17:14:32 +02003149void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003150{
3151 struct oob_data *data, *n;
3152
3153 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3154 list_del(&data->list);
3155 kfree(data);
3156 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003157}
3158
Marcel Holtmann07988722014-01-10 02:07:29 -08003159int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3160 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003161{
3162 struct oob_data *data;
3163
3164 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003165 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003166 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003167 if (!data)
3168 return -ENOMEM;
3169
3170 bacpy(&data->bdaddr, bdaddr);
3171 list_add(&data->list, &hdev->remote_oob_data);
3172 }
3173
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003174 memcpy(data->hash192, hash, sizeof(data->hash192));
3175 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003176
Marcel Holtmann07988722014-01-10 02:07:29 -08003177 memset(data->hash256, 0, sizeof(data->hash256));
3178 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3179
3180 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3181
3182 return 0;
3183}
3184
3185int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3186 u8 *hash192, u8 *randomizer192,
3187 u8 *hash256, u8 *randomizer256)
3188{
3189 struct oob_data *data;
3190
3191 data = hci_find_remote_oob_data(hdev, bdaddr);
3192 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003193 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003194 if (!data)
3195 return -ENOMEM;
3196
3197 bacpy(&data->bdaddr, bdaddr);
3198 list_add(&data->list, &hdev->remote_oob_data);
3199 }
3200
3201 memcpy(data->hash192, hash192, sizeof(data->hash192));
3202 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3203
3204 memcpy(data->hash256, hash256, sizeof(data->hash256));
3205 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3206
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003207 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003208
3209 return 0;
3210}
3211
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003212struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3213 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003214{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003215 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003216
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003217 list_for_each_entry(b, &hdev->blacklist, list) {
3218 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003219 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003220 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003221
3222 return NULL;
3223}
3224
Marcel Holtmannc9507492014-02-27 19:35:54 -08003225static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003226{
3227 struct list_head *p, *n;
3228
3229 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003230 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003231
3232 list_del(p);
3233 kfree(b);
3234 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003235}
3236
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003237int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003238{
3239 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003240
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003241 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003242 return -EBADF;
3243
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003244 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003245 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003246
3247 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003248 if (!entry)
3249 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003250
3251 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003252 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003253
3254 list_add(&entry->list, &hdev->blacklist);
3255
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003256 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003257}
3258
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003259int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003260{
3261 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003262
Johan Hedberg35f74982014-02-18 17:14:32 +02003263 if (!bacmp(bdaddr, BDADDR_ANY)) {
3264 hci_blacklist_clear(hdev);
3265 return 0;
3266 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003267
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003268 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003269 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003270 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003271
3272 list_del(&entry->list);
3273 kfree(entry);
3274
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003275 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003276}
3277
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003278struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3279 bdaddr_t *bdaddr, u8 type)
3280{
3281 struct bdaddr_list *b;
3282
3283 list_for_each_entry(b, &hdev->le_white_list, list) {
3284 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3285 return b;
3286 }
3287
3288 return NULL;
3289}
3290
3291void hci_white_list_clear(struct hci_dev *hdev)
3292{
3293 struct list_head *p, *n;
3294
3295 list_for_each_safe(p, n, &hdev->le_white_list) {
3296 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3297
3298 list_del(p);
3299 kfree(b);
3300 }
3301}
3302
3303int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3304{
3305 struct bdaddr_list *entry;
3306
3307 if (!bacmp(bdaddr, BDADDR_ANY))
3308 return -EBADF;
3309
3310 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3311 if (!entry)
3312 return -ENOMEM;
3313
3314 bacpy(&entry->bdaddr, bdaddr);
3315 entry->bdaddr_type = type;
3316
3317 list_add(&entry->list, &hdev->le_white_list);
3318
3319 return 0;
3320}
3321
3322int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3323{
3324 struct bdaddr_list *entry;
3325
3326 if (!bacmp(bdaddr, BDADDR_ANY))
3327 return -EBADF;
3328
3329 entry = hci_white_list_lookup(hdev, bdaddr, type);
3330 if (!entry)
3331 return -ENOENT;
3332
3333 list_del(&entry->list);
3334 kfree(entry);
3335
3336 return 0;
3337}
3338
Andre Guedes15819a72014-02-03 13:56:18 -03003339/* This function requires the caller holds hdev->lock */
3340struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3341 bdaddr_t *addr, u8 addr_type)
3342{
3343 struct hci_conn_params *params;
3344
3345 list_for_each_entry(params, &hdev->le_conn_params, list) {
3346 if (bacmp(&params->addr, addr) == 0 &&
3347 params->addr_type == addr_type) {
3348 return params;
3349 }
3350 }
3351
3352 return NULL;
3353}
3354
Andre Guedescef952c2014-02-26 20:21:49 -03003355static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3356{
3357 struct hci_conn *conn;
3358
3359 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3360 if (!conn)
3361 return false;
3362
3363 if (conn->dst_type != type)
3364 return false;
3365
3366 if (conn->state != BT_CONNECTED)
3367 return false;
3368
3369 return true;
3370}
3371
Andre Guedesa9b0a042014-02-26 20:21:52 -03003372static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3373{
3374 if (addr_type == ADDR_LE_DEV_PUBLIC)
3375 return true;
3376
3377 /* Check for Random Static address type */
3378 if ((addr->b[5] & 0xc0) == 0xc0)
3379 return true;
3380
3381 return false;
3382}
3383
Andre Guedes15819a72014-02-03 13:56:18 -03003384/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003385int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3386 u8 auto_connect, u16 conn_min_interval,
3387 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003388{
3389 struct hci_conn_params *params;
3390
Andre Guedesa9b0a042014-02-26 20:21:52 -03003391 if (!is_identity_address(addr, addr_type))
3392 return -EINVAL;
3393
Andre Guedes15819a72014-02-03 13:56:18 -03003394 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003395 if (params)
3396 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003397
3398 params = kzalloc(sizeof(*params), GFP_KERNEL);
3399 if (!params) {
3400 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003401 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003402 }
3403
3404 bacpy(&params->addr, addr);
3405 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003406
3407 list_add(&params->list, &hdev->le_conn_params);
3408
3409update:
Andre Guedes15819a72014-02-03 13:56:18 -03003410 params->conn_min_interval = conn_min_interval;
3411 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003412 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003413
Andre Guedescef952c2014-02-26 20:21:49 -03003414 switch (auto_connect) {
3415 case HCI_AUTO_CONN_DISABLED:
3416 case HCI_AUTO_CONN_LINK_LOSS:
3417 hci_pend_le_conn_del(hdev, addr, addr_type);
3418 break;
3419 case HCI_AUTO_CONN_ALWAYS:
3420 if (!is_connected(hdev, addr, addr_type))
3421 hci_pend_le_conn_add(hdev, addr, addr_type);
3422 break;
3423 }
Andre Guedes15819a72014-02-03 13:56:18 -03003424
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003425 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3426 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3427 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003428
3429 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003430}
3431
3432/* This function requires the caller holds hdev->lock */
3433void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3434{
3435 struct hci_conn_params *params;
3436
3437 params = hci_conn_params_lookup(hdev, addr, addr_type);
3438 if (!params)
3439 return;
3440
Andre Guedescef952c2014-02-26 20:21:49 -03003441 hci_pend_le_conn_del(hdev, addr, addr_type);
3442
Andre Guedes15819a72014-02-03 13:56:18 -03003443 list_del(&params->list);
3444 kfree(params);
3445
3446 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3447}
3448
3449/* This function requires the caller holds hdev->lock */
3450void hci_conn_params_clear(struct hci_dev *hdev)
3451{
3452 struct hci_conn_params *params, *tmp;
3453
3454 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3455 list_del(&params->list);
3456 kfree(params);
3457 }
3458
3459 BT_DBG("All LE connection parameters were removed");
3460}
3461
Andre Guedes77a77a32014-02-26 20:21:46 -03003462/* This function requires the caller holds hdev->lock */
3463struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3464 bdaddr_t *addr, u8 addr_type)
3465{
3466 struct bdaddr_list *entry;
3467
3468 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3469 if (bacmp(&entry->bdaddr, addr) == 0 &&
3470 entry->bdaddr_type == addr_type)
3471 return entry;
3472 }
3473
3474 return NULL;
3475}
3476
3477/* This function requires the caller holds hdev->lock */
3478void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3479{
3480 struct bdaddr_list *entry;
3481
3482 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3483 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003484 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003485
3486 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3487 if (!entry) {
3488 BT_ERR("Out of memory");
3489 return;
3490 }
3491
3492 bacpy(&entry->bdaddr, addr);
3493 entry->bdaddr_type = addr_type;
3494
3495 list_add(&entry->list, &hdev->pend_le_conns);
3496
3497 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003498
3499done:
3500 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003501}
3502
3503/* This function requires the caller holds hdev->lock */
3504void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3505{
3506 struct bdaddr_list *entry;
3507
3508 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3509 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003510 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003511
3512 list_del(&entry->list);
3513 kfree(entry);
3514
3515 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003516
3517done:
3518 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003519}
3520
3521/* This function requires the caller holds hdev->lock */
3522void hci_pend_le_conns_clear(struct hci_dev *hdev)
3523{
3524 struct bdaddr_list *entry, *tmp;
3525
3526 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3527 list_del(&entry->list);
3528 kfree(entry);
3529 }
3530
3531 BT_DBG("All LE pending connections cleared");
3532}
3533
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003534static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003535{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003536 if (status) {
3537 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003538
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003539 hci_dev_lock(hdev);
3540 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3541 hci_dev_unlock(hdev);
3542 return;
3543 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003544}
3545
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003546static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003547{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003548 /* General inquiry access code (GIAC) */
3549 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3550 struct hci_request req;
3551 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003552 int err;
3553
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003554 if (status) {
3555 BT_ERR("Failed to disable LE scanning: status %d", status);
3556 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003557 }
3558
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003559 switch (hdev->discovery.type) {
3560 case DISCOV_TYPE_LE:
3561 hci_dev_lock(hdev);
3562 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3563 hci_dev_unlock(hdev);
3564 break;
3565
3566 case DISCOV_TYPE_INTERLEAVED:
3567 hci_req_init(&req, hdev);
3568
3569 memset(&cp, 0, sizeof(cp));
3570 memcpy(&cp.lap, lap, sizeof(cp.lap));
3571 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3572 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3573
3574 hci_dev_lock(hdev);
3575
3576 hci_inquiry_cache_flush(hdev);
3577
3578 err = hci_req_run(&req, inquiry_complete);
3579 if (err) {
3580 BT_ERR("Inquiry request failed: err %d", err);
3581 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3582 }
3583
3584 hci_dev_unlock(hdev);
3585 break;
3586 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003587}
3588
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003589static void le_scan_disable_work(struct work_struct *work)
3590{
3591 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003592 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003593 struct hci_request req;
3594 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003595
3596 BT_DBG("%s", hdev->name);
3597
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003598 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003599
Andre Guedesb1efcc22014-02-26 20:21:40 -03003600 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003601
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003602 err = hci_req_run(&req, le_scan_disable_work_complete);
3603 if (err)
3604 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003605}
3606
Johan Hedberg8d972502014-02-28 12:54:14 +02003607static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3608{
3609 struct hci_dev *hdev = req->hdev;
3610
3611 /* If we're advertising or initiating an LE connection we can't
3612 * go ahead and change the random address at this time. This is
3613 * because the eventual initiator address used for the
3614 * subsequently created connection will be undefined (some
3615 * controllers use the new address and others the one we had
3616 * when the operation started).
3617 *
3618 * In this kind of scenario skip the update and let the random
3619 * address be updated at the next cycle.
3620 */
3621 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3622 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3623 BT_DBG("Deferring random address update");
3624 return;
3625 }
3626
3627 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3628}
3629
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003630int hci_update_random_address(struct hci_request *req, bool require_privacy,
3631 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003632{
3633 struct hci_dev *hdev = req->hdev;
3634 int err;
3635
3636 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003637 * current RPA has expired or there is something else than
3638 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003639 */
3640 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003641 int to;
3642
3643 *own_addr_type = ADDR_LE_DEV_RANDOM;
3644
3645 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003646 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003647 return 0;
3648
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003649 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003650 if (err < 0) {
3651 BT_ERR("%s failed to generate new RPA", hdev->name);
3652 return err;
3653 }
3654
Johan Hedberg8d972502014-02-28 12:54:14 +02003655 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003656
3657 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3658 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3659
3660 return 0;
3661 }
3662
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003663 /* In case of required privacy without resolvable private address,
3664 * use an unresolvable private address. This is useful for active
3665 * scanning and non-connectable advertising.
3666 */
3667 if (require_privacy) {
3668 bdaddr_t urpa;
3669
3670 get_random_bytes(&urpa, 6);
3671 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3672
3673 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003674 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003675 return 0;
3676 }
3677
Johan Hedbergebd3a742014-02-23 19:42:21 +02003678 /* If forcing static address is in use or there is no public
3679 * address use the static address as random address (but skip
3680 * the HCI command if the current random address is already the
3681 * static one.
3682 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003683 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003684 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3685 *own_addr_type = ADDR_LE_DEV_RANDOM;
3686 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3687 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3688 &hdev->static_addr);
3689 return 0;
3690 }
3691
3692 /* Neither privacy nor static address is being used so use a
3693 * public address.
3694 */
3695 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3696
3697 return 0;
3698}
3699
Johan Hedberga1f4c312014-02-27 14:05:41 +02003700/* Copy the Identity Address of the controller.
3701 *
3702 * If the controller has a public BD_ADDR, then by default use that one.
3703 * If this is a LE only controller without a public address, default to
3704 * the static random address.
3705 *
3706 * For debugging purposes it is possible to force controllers with a
3707 * public address to use the static random address instead.
3708 */
3709void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3710 u8 *bdaddr_type)
3711{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003712 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003713 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3714 bacpy(bdaddr, &hdev->static_addr);
3715 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3716 } else {
3717 bacpy(bdaddr, &hdev->bdaddr);
3718 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3719 }
3720}
3721
David Herrmann9be0dab2012-04-22 14:39:57 +02003722/* Alloc HCI device */
3723struct hci_dev *hci_alloc_dev(void)
3724{
3725 struct hci_dev *hdev;
3726
3727 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3728 if (!hdev)
3729 return NULL;
3730
David Herrmannb1b813d2012-04-22 14:39:58 +02003731 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3732 hdev->esco_type = (ESCO_HV1);
3733 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003734 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3735 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003736 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3737 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003738
David Herrmannb1b813d2012-04-22 14:39:58 +02003739 hdev->sniff_max_interval = 800;
3740 hdev->sniff_min_interval = 80;
3741
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003742 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003743 hdev->le_scan_interval = 0x0060;
3744 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003745 hdev->le_conn_min_interval = 0x0028;
3746 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003747
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003748 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003749 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003750 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3751 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003752
David Herrmannb1b813d2012-04-22 14:39:58 +02003753 mutex_init(&hdev->lock);
3754 mutex_init(&hdev->req_lock);
3755
3756 INIT_LIST_HEAD(&hdev->mgmt_pending);
3757 INIT_LIST_HEAD(&hdev->blacklist);
3758 INIT_LIST_HEAD(&hdev->uuids);
3759 INIT_LIST_HEAD(&hdev->link_keys);
3760 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003761 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003762 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003763 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003764 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003765 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003766 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003767
3768 INIT_WORK(&hdev->rx_work, hci_rx_work);
3769 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3770 INIT_WORK(&hdev->tx_work, hci_tx_work);
3771 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003772
David Herrmannb1b813d2012-04-22 14:39:58 +02003773 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3774 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3775 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3776
David Herrmannb1b813d2012-04-22 14:39:58 +02003777 skb_queue_head_init(&hdev->rx_q);
3778 skb_queue_head_init(&hdev->cmd_q);
3779 skb_queue_head_init(&hdev->raw_q);
3780
3781 init_waitqueue_head(&hdev->req_wait_q);
3782
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003783 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003784
David Herrmannb1b813d2012-04-22 14:39:58 +02003785 hci_init_sysfs(hdev);
3786 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003787
3788 return hdev;
3789}
3790EXPORT_SYMBOL(hci_alloc_dev);
3791
3792/* Free HCI device */
3793void hci_free_dev(struct hci_dev *hdev)
3794{
David Herrmann9be0dab2012-04-22 14:39:57 +02003795 /* will free via device release */
3796 put_device(&hdev->dev);
3797}
3798EXPORT_SYMBOL(hci_free_dev);
3799
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800/* Register HCI device */
3801int hci_register_dev(struct hci_dev *hdev)
3802{
David Herrmannb1b813d2012-04-22 14:39:58 +02003803 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804
David Herrmann010666a2012-01-07 15:47:07 +01003805 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806 return -EINVAL;
3807
Mat Martineau08add512011-11-02 16:18:36 -07003808 /* Do not allow HCI_AMP devices to register at index 0,
3809 * so the index can be used as the AMP controller ID.
3810 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003811 switch (hdev->dev_type) {
3812 case HCI_BREDR:
3813 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3814 break;
3815 case HCI_AMP:
3816 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3817 break;
3818 default:
3819 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003821
Sasha Levin3df92b32012-05-27 22:36:56 +02003822 if (id < 0)
3823 return id;
3824
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 sprintf(hdev->name, "hci%d", id);
3826 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003827
3828 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3829
Kees Cookd8537542013-07-03 15:04:57 -07003830 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3831 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003832 if (!hdev->workqueue) {
3833 error = -ENOMEM;
3834 goto err;
3835 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003836
Kees Cookd8537542013-07-03 15:04:57 -07003837 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3838 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003839 if (!hdev->req_workqueue) {
3840 destroy_workqueue(hdev->workqueue);
3841 error = -ENOMEM;
3842 goto err;
3843 }
3844
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003845 if (!IS_ERR_OR_NULL(bt_debugfs))
3846 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3847
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003848 dev_set_name(&hdev->dev, "%s", hdev->name);
3849
Johan Hedberg99780a72014-02-18 10:40:07 +02003850 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3851 CRYPTO_ALG_ASYNC);
3852 if (IS_ERR(hdev->tfm_aes)) {
3853 BT_ERR("Unable to create crypto context");
3854 error = PTR_ERR(hdev->tfm_aes);
3855 hdev->tfm_aes = NULL;
3856 goto err_wqueue;
3857 }
3858
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003859 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003860 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003861 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003863 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003864 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3865 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003866 if (hdev->rfkill) {
3867 if (rfkill_register(hdev->rfkill) < 0) {
3868 rfkill_destroy(hdev->rfkill);
3869 hdev->rfkill = NULL;
3870 }
3871 }
3872
Johan Hedberg5e130362013-09-13 08:58:17 +03003873 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3874 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3875
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003876 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003877 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003878
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003879 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003880 /* Assume BR/EDR support until proven otherwise (such as
3881 * through reading supported features during init.
3882 */
3883 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3884 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003885
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003886 write_lock(&hci_dev_list_lock);
3887 list_add(&hdev->list, &hci_dev_list);
3888 write_unlock(&hci_dev_list_lock);
3889
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003891 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892
Johan Hedberg19202572013-01-14 22:33:51 +02003893 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003894
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003896
Johan Hedberg99780a72014-02-18 10:40:07 +02003897err_tfm:
3898 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003899err_wqueue:
3900 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003901 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003902err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003903 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003904
David Herrmann33ca9542011-10-08 14:58:49 +02003905 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906}
3907EXPORT_SYMBOL(hci_register_dev);
3908
3909/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003910void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911{
Sasha Levin3df92b32012-05-27 22:36:56 +02003912 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003913
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003914 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915
Johan Hovold94324962012-03-15 14:48:41 +01003916 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3917
Sasha Levin3df92b32012-05-27 22:36:56 +02003918 id = hdev->id;
3919
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003920 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003922 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923
3924 hci_dev_do_close(hdev);
3925
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303926 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003927 kfree_skb(hdev->reassembly[i]);
3928
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003929 cancel_work_sync(&hdev->power_on);
3930
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003931 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003932 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003933 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003934 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003935 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003936 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003937
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003938 /* mgmt_index_removed should take care of emptying the
3939 * pending list */
3940 BUG_ON(!list_empty(&hdev->mgmt_pending));
3941
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 hci_notify(hdev, HCI_DEV_UNREG);
3943
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003944 if (hdev->rfkill) {
3945 rfkill_unregister(hdev->rfkill);
3946 rfkill_destroy(hdev->rfkill);
3947 }
3948
Johan Hedberg99780a72014-02-18 10:40:07 +02003949 if (hdev->tfm_aes)
3950 crypto_free_blkcipher(hdev->tfm_aes);
3951
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003952 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003953
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003954 debugfs_remove_recursive(hdev->debugfs);
3955
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003956 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003957 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003958
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003959 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003960 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003961 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003962 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003963 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003964 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003965 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003966 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003967 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003968 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003969 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003970
David Herrmanndc946bd2012-01-07 15:47:24 +01003971 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003972
3973 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974}
3975EXPORT_SYMBOL(hci_unregister_dev);
3976
3977/* Suspend HCI device */
3978int hci_suspend_dev(struct hci_dev *hdev)
3979{
3980 hci_notify(hdev, HCI_DEV_SUSPEND);
3981 return 0;
3982}
3983EXPORT_SYMBOL(hci_suspend_dev);
3984
3985/* Resume HCI device */
3986int hci_resume_dev(struct hci_dev *hdev)
3987{
3988 hci_notify(hdev, HCI_DEV_RESUME);
3989 return 0;
3990}
3991EXPORT_SYMBOL(hci_resume_dev);
3992
Marcel Holtmann76bca882009-11-18 00:40:39 +01003993/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003994int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003995{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003996 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003997 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003998 kfree_skb(skb);
3999 return -ENXIO;
4000 }
4001
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004002 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004003 bt_cb(skb)->incoming = 1;
4004
4005 /* Time stamp */
4006 __net_timestamp(skb);
4007
Marcel Holtmann76bca882009-11-18 00:40:39 +01004008 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004009 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004010
Marcel Holtmann76bca882009-11-18 00:40:39 +01004011 return 0;
4012}
4013EXPORT_SYMBOL(hci_recv_frame);
4014
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304015static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004016 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304017{
4018 int len = 0;
4019 int hlen = 0;
4020 int remain = count;
4021 struct sk_buff *skb;
4022 struct bt_skb_cb *scb;
4023
4024 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004025 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304026 return -EILSEQ;
4027
4028 skb = hdev->reassembly[index];
4029
4030 if (!skb) {
4031 switch (type) {
4032 case HCI_ACLDATA_PKT:
4033 len = HCI_MAX_FRAME_SIZE;
4034 hlen = HCI_ACL_HDR_SIZE;
4035 break;
4036 case HCI_EVENT_PKT:
4037 len = HCI_MAX_EVENT_SIZE;
4038 hlen = HCI_EVENT_HDR_SIZE;
4039 break;
4040 case HCI_SCODATA_PKT:
4041 len = HCI_MAX_SCO_SIZE;
4042 hlen = HCI_SCO_HDR_SIZE;
4043 break;
4044 }
4045
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004046 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304047 if (!skb)
4048 return -ENOMEM;
4049
4050 scb = (void *) skb->cb;
4051 scb->expect = hlen;
4052 scb->pkt_type = type;
4053
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304054 hdev->reassembly[index] = skb;
4055 }
4056
4057 while (count) {
4058 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004059 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304060
4061 memcpy(skb_put(skb, len), data, len);
4062
4063 count -= len;
4064 data += len;
4065 scb->expect -= len;
4066 remain = count;
4067
4068 switch (type) {
4069 case HCI_EVENT_PKT:
4070 if (skb->len == HCI_EVENT_HDR_SIZE) {
4071 struct hci_event_hdr *h = hci_event_hdr(skb);
4072 scb->expect = h->plen;
4073
4074 if (skb_tailroom(skb) < scb->expect) {
4075 kfree_skb(skb);
4076 hdev->reassembly[index] = NULL;
4077 return -ENOMEM;
4078 }
4079 }
4080 break;
4081
4082 case HCI_ACLDATA_PKT:
4083 if (skb->len == HCI_ACL_HDR_SIZE) {
4084 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4085 scb->expect = __le16_to_cpu(h->dlen);
4086
4087 if (skb_tailroom(skb) < scb->expect) {
4088 kfree_skb(skb);
4089 hdev->reassembly[index] = NULL;
4090 return -ENOMEM;
4091 }
4092 }
4093 break;
4094
4095 case HCI_SCODATA_PKT:
4096 if (skb->len == HCI_SCO_HDR_SIZE) {
4097 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4098 scb->expect = h->dlen;
4099
4100 if (skb_tailroom(skb) < scb->expect) {
4101 kfree_skb(skb);
4102 hdev->reassembly[index] = NULL;
4103 return -ENOMEM;
4104 }
4105 }
4106 break;
4107 }
4108
4109 if (scb->expect == 0) {
4110 /* Complete frame */
4111
4112 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004113 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304114
4115 hdev->reassembly[index] = NULL;
4116 return remain;
4117 }
4118 }
4119
4120 return remain;
4121}
4122
Marcel Holtmannef222012007-07-11 06:42:04 +02004123int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4124{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304125 int rem = 0;
4126
Marcel Holtmannef222012007-07-11 06:42:04 +02004127 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4128 return -EILSEQ;
4129
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004130 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004131 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304132 if (rem < 0)
4133 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004134
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304135 data += (count - rem);
4136 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004137 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004138
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304139 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004140}
4141EXPORT_SYMBOL(hci_recv_fragment);
4142
Suraj Sumangala99811512010-07-14 13:02:19 +05304143#define STREAM_REASSEMBLY 0
4144
4145int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4146{
4147 int type;
4148 int rem = 0;
4149
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004150 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304151 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4152
4153 if (!skb) {
4154 struct { char type; } *pkt;
4155
4156 /* Start of the frame */
4157 pkt = data;
4158 type = pkt->type;
4159
4160 data++;
4161 count--;
4162 } else
4163 type = bt_cb(skb)->pkt_type;
4164
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004165 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004166 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304167 if (rem < 0)
4168 return rem;
4169
4170 data += (count - rem);
4171 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004172 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304173
4174 return rem;
4175}
4176EXPORT_SYMBOL(hci_recv_stream_fragment);
4177
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178/* ---- Interface to upper protocols ---- */
4179
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180int hci_register_cb(struct hci_cb *cb)
4181{
4182 BT_DBG("%p name %s", cb, cb->name);
4183
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004184 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004186 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187
4188 return 0;
4189}
4190EXPORT_SYMBOL(hci_register_cb);
4191
4192int hci_unregister_cb(struct hci_cb *cb)
4193{
4194 BT_DBG("%p name %s", cb, cb->name);
4195
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004196 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004198 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199
4200 return 0;
4201}
4202EXPORT_SYMBOL(hci_unregister_cb);
4203
Marcel Holtmann51086992013-10-10 14:54:19 -07004204static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004206 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004208 /* Time stamp */
4209 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004211 /* Send copy to monitor */
4212 hci_send_to_monitor(hdev, skb);
4213
4214 if (atomic_read(&hdev->promisc)) {
4215 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004216 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 }
4218
4219 /* Get rid of skb owner, prior to sending to the driver. */
4220 skb_orphan(skb);
4221
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004222 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004223 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224}
4225
Johan Hedberg3119ae92013-03-05 20:37:44 +02004226void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4227{
4228 skb_queue_head_init(&req->cmd_q);
4229 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004230 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004231}
4232
4233int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4234{
4235 struct hci_dev *hdev = req->hdev;
4236 struct sk_buff *skb;
4237 unsigned long flags;
4238
4239 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4240
Andre Guedes5d73e032013-03-08 11:20:16 -03004241 /* If an error occured during request building, remove all HCI
4242 * commands queued on the HCI request queue.
4243 */
4244 if (req->err) {
4245 skb_queue_purge(&req->cmd_q);
4246 return req->err;
4247 }
4248
Johan Hedberg3119ae92013-03-05 20:37:44 +02004249 /* Do not allow empty requests */
4250 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004251 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004252
4253 skb = skb_peek_tail(&req->cmd_q);
4254 bt_cb(skb)->req.complete = complete;
4255
4256 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4257 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4258 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4259
4260 queue_work(hdev->workqueue, &hdev->cmd_work);
4261
4262 return 0;
4263}
4264
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004265static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004266 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267{
4268 int len = HCI_COMMAND_HDR_SIZE + plen;
4269 struct hci_command_hdr *hdr;
4270 struct sk_buff *skb;
4271
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004273 if (!skb)
4274 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275
4276 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004277 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 hdr->plen = plen;
4279
4280 if (plen)
4281 memcpy(skb_put(skb, plen), param, plen);
4282
4283 BT_DBG("skb len %d", skb->len);
4284
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004285 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004286
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004287 return skb;
4288}
4289
4290/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004291int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4292 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004293{
4294 struct sk_buff *skb;
4295
4296 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4297
4298 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4299 if (!skb) {
4300 BT_ERR("%s no memory for command", hdev->name);
4301 return -ENOMEM;
4302 }
4303
Johan Hedberg11714b32013-03-05 20:37:47 +02004304 /* Stand-alone HCI commands must be flaged as
4305 * single-command requests.
4306 */
4307 bt_cb(skb)->req.start = true;
4308
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004310 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311
4312 return 0;
4313}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314
Johan Hedberg71c76a12013-03-05 20:37:46 +02004315/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4317 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004318{
4319 struct hci_dev *hdev = req->hdev;
4320 struct sk_buff *skb;
4321
4322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4323
Andre Guedes34739c12013-03-08 11:20:18 -03004324 /* If an error occured during request building, there is no point in
4325 * queueing the HCI command. We can simply return.
4326 */
4327 if (req->err)
4328 return;
4329
Johan Hedberg71c76a12013-03-05 20:37:46 +02004330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4331 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4333 hdev->name, opcode);
4334 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004335 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004336 }
4337
4338 if (skb_queue_empty(&req->cmd_q))
4339 bt_cb(skb)->req.start = true;
4340
Johan Hedberg02350a72013-04-03 21:50:29 +03004341 bt_cb(skb)->req.event = event;
4342
Johan Hedberg71c76a12013-03-05 20:37:46 +02004343 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004344}
4345
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4347 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004348{
4349 hci_req_add_ev(req, opcode, plen, param, 0);
4350}
4351
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004353void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354{
4355 struct hci_command_hdr *hdr;
4356
4357 if (!hdev->sent_cmd)
4358 return NULL;
4359
4360 hdr = (void *) hdev->sent_cmd->data;
4361
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004362 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363 return NULL;
4364
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004365 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366
4367 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4368}
4369
4370/* Send ACL data */
4371static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4372{
4373 struct hci_acl_hdr *hdr;
4374 int len = skb->len;
4375
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004376 skb_push(skb, HCI_ACL_HDR_SIZE);
4377 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004378 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004379 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4380 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381}
4382
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004383static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004384 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004386 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387 struct hci_dev *hdev = conn->hdev;
4388 struct sk_buff *list;
4389
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004390 skb->len = skb_headlen(skb);
4391 skb->data_len = 0;
4392
4393 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004394
4395 switch (hdev->dev_type) {
4396 case HCI_BREDR:
4397 hci_add_acl_hdr(skb, conn->handle, flags);
4398 break;
4399 case HCI_AMP:
4400 hci_add_acl_hdr(skb, chan->handle, flags);
4401 break;
4402 default:
4403 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4404 return;
4405 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004406
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004407 list = skb_shinfo(skb)->frag_list;
4408 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 /* Non fragmented */
4410 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4411
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004412 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413 } else {
4414 /* Fragmented */
4415 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4416
4417 skb_shinfo(skb)->frag_list = NULL;
4418
4419 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004420 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004422 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004423
4424 flags &= ~ACL_START;
4425 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426 do {
4427 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004428
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004429 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004430 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431
4432 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4433
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004434 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435 } while (list);
4436
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004437 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004439}
4440
4441void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4442{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004443 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004444
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004445 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004446
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004447 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004449 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451
4452/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004453void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454{
4455 struct hci_dev *hdev = conn->hdev;
4456 struct hci_sco_hdr hdr;
4457
4458 BT_DBG("%s len %d", hdev->name, skb->len);
4459
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004460 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 hdr.dlen = skb->len;
4462
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004463 skb_push(skb, HCI_SCO_HDR_SIZE);
4464 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004465 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004467 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004468
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004470 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472
4473/* ---- HCI TX task (outgoing data) ---- */
4474
4475/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004476static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4477 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478{
4479 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004480 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004481 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004483 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004485
4486 rcu_read_lock();
4487
4488 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004489 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004491
4492 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4493 continue;
4494
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 num++;
4496
4497 if (c->sent < min) {
4498 min = c->sent;
4499 conn = c;
4500 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004501
4502 if (hci_conn_num(hdev, type) == num)
4503 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 }
4505
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004506 rcu_read_unlock();
4507
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004509 int cnt, q;
4510
4511 switch (conn->type) {
4512 case ACL_LINK:
4513 cnt = hdev->acl_cnt;
4514 break;
4515 case SCO_LINK:
4516 case ESCO_LINK:
4517 cnt = hdev->sco_cnt;
4518 break;
4519 case LE_LINK:
4520 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4521 break;
4522 default:
4523 cnt = 0;
4524 BT_ERR("Unknown link type");
4525 }
4526
4527 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528 *quote = q ? q : 1;
4529 } else
4530 *quote = 0;
4531
4532 BT_DBG("conn %p quote %d", conn, *quote);
4533 return conn;
4534}
4535
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004536static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537{
4538 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004539 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540
Ville Tervobae1f5d92011-02-10 22:38:53 -03004541 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004543 rcu_read_lock();
4544
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004546 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004547 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004548 BT_ERR("%s killing stalled connection %pMR",
4549 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004550 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551 }
4552 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004553
4554 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555}
4556
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004557static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4558 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004559{
4560 struct hci_conn_hash *h = &hdev->conn_hash;
4561 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004562 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004563 struct hci_conn *conn;
4564 int cnt, q, conn_num = 0;
4565
4566 BT_DBG("%s", hdev->name);
4567
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004568 rcu_read_lock();
4569
4570 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004571 struct hci_chan *tmp;
4572
4573 if (conn->type != type)
4574 continue;
4575
4576 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4577 continue;
4578
4579 conn_num++;
4580
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004581 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004582 struct sk_buff *skb;
4583
4584 if (skb_queue_empty(&tmp->data_q))
4585 continue;
4586
4587 skb = skb_peek(&tmp->data_q);
4588 if (skb->priority < cur_prio)
4589 continue;
4590
4591 if (skb->priority > cur_prio) {
4592 num = 0;
4593 min = ~0;
4594 cur_prio = skb->priority;
4595 }
4596
4597 num++;
4598
4599 if (conn->sent < min) {
4600 min = conn->sent;
4601 chan = tmp;
4602 }
4603 }
4604
4605 if (hci_conn_num(hdev, type) == conn_num)
4606 break;
4607 }
4608
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004609 rcu_read_unlock();
4610
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004611 if (!chan)
4612 return NULL;
4613
4614 switch (chan->conn->type) {
4615 case ACL_LINK:
4616 cnt = hdev->acl_cnt;
4617 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004618 case AMP_LINK:
4619 cnt = hdev->block_cnt;
4620 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004621 case SCO_LINK:
4622 case ESCO_LINK:
4623 cnt = hdev->sco_cnt;
4624 break;
4625 case LE_LINK:
4626 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4627 break;
4628 default:
4629 cnt = 0;
4630 BT_ERR("Unknown link type");
4631 }
4632
4633 q = cnt / num;
4634 *quote = q ? q : 1;
4635 BT_DBG("chan %p quote %d", chan, *quote);
4636 return chan;
4637}
4638
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004639static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4640{
4641 struct hci_conn_hash *h = &hdev->conn_hash;
4642 struct hci_conn *conn;
4643 int num = 0;
4644
4645 BT_DBG("%s", hdev->name);
4646
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004647 rcu_read_lock();
4648
4649 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004650 struct hci_chan *chan;
4651
4652 if (conn->type != type)
4653 continue;
4654
4655 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4656 continue;
4657
4658 num++;
4659
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004660 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004661 struct sk_buff *skb;
4662
4663 if (chan->sent) {
4664 chan->sent = 0;
4665 continue;
4666 }
4667
4668 if (skb_queue_empty(&chan->data_q))
4669 continue;
4670
4671 skb = skb_peek(&chan->data_q);
4672 if (skb->priority >= HCI_PRIO_MAX - 1)
4673 continue;
4674
4675 skb->priority = HCI_PRIO_MAX - 1;
4676
4677 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004678 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004679 }
4680
4681 if (hci_conn_num(hdev, type) == num)
4682 break;
4683 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004684
4685 rcu_read_unlock();
4686
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004687}
4688
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004689static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4690{
4691 /* Calculate count of blocks used by this packet */
4692 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4693}
4694
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004695static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 if (!test_bit(HCI_RAW, &hdev->flags)) {
4698 /* ACL tx timeout must be longer than maximum
4699 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004700 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004701 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004702 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004704}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004706static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004707{
4708 unsigned int cnt = hdev->acl_cnt;
4709 struct hci_chan *chan;
4710 struct sk_buff *skb;
4711 int quote;
4712
4713 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004714
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004715 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004716 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004717 u32 priority = (skb_peek(&chan->data_q))->priority;
4718 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004719 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004720 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004721
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004722 /* Stop if priority has changed */
4723 if (skb->priority < priority)
4724 break;
4725
4726 skb = skb_dequeue(&chan->data_q);
4727
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004728 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004729 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004730
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004731 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732 hdev->acl_last_tx = jiffies;
4733
4734 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004735 chan->sent++;
4736 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737 }
4738 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004739
4740 if (cnt != hdev->acl_cnt)
4741 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742}
4743
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004744static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004745{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004746 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004747 struct hci_chan *chan;
4748 struct sk_buff *skb;
4749 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004750 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004751
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004752 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004753
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004754 BT_DBG("%s", hdev->name);
4755
4756 if (hdev->dev_type == HCI_AMP)
4757 type = AMP_LINK;
4758 else
4759 type = ACL_LINK;
4760
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004761 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004762 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004763 u32 priority = (skb_peek(&chan->data_q))->priority;
4764 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4765 int blocks;
4766
4767 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004768 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004769
4770 /* Stop if priority has changed */
4771 if (skb->priority < priority)
4772 break;
4773
4774 skb = skb_dequeue(&chan->data_q);
4775
4776 blocks = __get_blocks(hdev, skb);
4777 if (blocks > hdev->block_cnt)
4778 return;
4779
4780 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004781 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004782
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004783 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004784 hdev->acl_last_tx = jiffies;
4785
4786 hdev->block_cnt -= blocks;
4787 quote -= blocks;
4788
4789 chan->sent += blocks;
4790 chan->conn->sent += blocks;
4791 }
4792 }
4793
4794 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004795 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004796}
4797
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004798static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004799{
4800 BT_DBG("%s", hdev->name);
4801
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004802 /* No ACL link over BR/EDR controller */
4803 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4804 return;
4805
4806 /* No AMP link over AMP controller */
4807 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004808 return;
4809
4810 switch (hdev->flow_ctl_mode) {
4811 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4812 hci_sched_acl_pkt(hdev);
4813 break;
4814
4815 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4816 hci_sched_acl_blk(hdev);
4817 break;
4818 }
4819}
4820
Linus Torvalds1da177e2005-04-16 15:20:36 -07004821/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004822static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823{
4824 struct hci_conn *conn;
4825 struct sk_buff *skb;
4826 int quote;
4827
4828 BT_DBG("%s", hdev->name);
4829
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004830 if (!hci_conn_num(hdev, SCO_LINK))
4831 return;
4832
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4834 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4835 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004836 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837
4838 conn->sent++;
4839 if (conn->sent == ~0)
4840 conn->sent = 0;
4841 }
4842 }
4843}
4844
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004845static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004846{
4847 struct hci_conn *conn;
4848 struct sk_buff *skb;
4849 int quote;
4850
4851 BT_DBG("%s", hdev->name);
4852
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004853 if (!hci_conn_num(hdev, ESCO_LINK))
4854 return;
4855
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004856 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4857 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004858 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4859 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004860 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004861
4862 conn->sent++;
4863 if (conn->sent == ~0)
4864 conn->sent = 0;
4865 }
4866 }
4867}
4868
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004869static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004870{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004871 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004872 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004873 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004874
4875 BT_DBG("%s", hdev->name);
4876
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004877 if (!hci_conn_num(hdev, LE_LINK))
4878 return;
4879
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004880 if (!test_bit(HCI_RAW, &hdev->flags)) {
4881 /* LE tx timeout must be longer than maximum
4882 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004883 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004884 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004885 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004886 }
4887
4888 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004889 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004890 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004891 u32 priority = (skb_peek(&chan->data_q))->priority;
4892 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004893 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004894 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004895
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004896 /* Stop if priority has changed */
4897 if (skb->priority < priority)
4898 break;
4899
4900 skb = skb_dequeue(&chan->data_q);
4901
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004902 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004903 hdev->le_last_tx = jiffies;
4904
4905 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004906 chan->sent++;
4907 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004908 }
4909 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004910
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004911 if (hdev->le_pkts)
4912 hdev->le_cnt = cnt;
4913 else
4914 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004915
4916 if (cnt != tmp)
4917 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004918}
4919
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004920static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004922 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923 struct sk_buff *skb;
4924
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004925 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004926 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927
Marcel Holtmann52de5992013-09-03 18:08:38 -07004928 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4929 /* Schedule queues and send stuff to HCI driver */
4930 hci_sched_acl(hdev);
4931 hci_sched_sco(hdev);
4932 hci_sched_esco(hdev);
4933 hci_sched_le(hdev);
4934 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004935
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 /* Send next queued raw (unknown type) packet */
4937 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004938 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939}
4940
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004941/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004942
4943/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004944static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945{
4946 struct hci_acl_hdr *hdr = (void *) skb->data;
4947 struct hci_conn *conn;
4948 __u16 handle, flags;
4949
4950 skb_pull(skb, HCI_ACL_HDR_SIZE);
4951
4952 handle = __le16_to_cpu(hdr->handle);
4953 flags = hci_flags(handle);
4954 handle = hci_handle(handle);
4955
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004956 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004957 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958
4959 hdev->stat.acl_rx++;
4960
4961 hci_dev_lock(hdev);
4962 conn = hci_conn_hash_lookup_handle(hdev, handle);
4963 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004964
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004966 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004967
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004969 l2cap_recv_acldata(conn, skb, flags);
4970 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004972 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004973 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974 }
4975
4976 kfree_skb(skb);
4977}
4978
4979/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004980static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981{
4982 struct hci_sco_hdr *hdr = (void *) skb->data;
4983 struct hci_conn *conn;
4984 __u16 handle;
4985
4986 skb_pull(skb, HCI_SCO_HDR_SIZE);
4987
4988 handle = __le16_to_cpu(hdr->handle);
4989
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004990 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991
4992 hdev->stat.sco_rx++;
4993
4994 hci_dev_lock(hdev);
4995 conn = hci_conn_hash_lookup_handle(hdev, handle);
4996 hci_dev_unlock(hdev);
4997
4998 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005000 sco_recv_scodata(conn, skb);
5001 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005003 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005004 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005 }
5006
5007 kfree_skb(skb);
5008}
5009
Johan Hedberg9238f362013-03-05 20:37:48 +02005010static bool hci_req_is_complete(struct hci_dev *hdev)
5011{
5012 struct sk_buff *skb;
5013
5014 skb = skb_peek(&hdev->cmd_q);
5015 if (!skb)
5016 return true;
5017
5018 return bt_cb(skb)->req.start;
5019}
5020
Johan Hedberg42c6b122013-03-05 20:37:49 +02005021static void hci_resend_last(struct hci_dev *hdev)
5022{
5023 struct hci_command_hdr *sent;
5024 struct sk_buff *skb;
5025 u16 opcode;
5026
5027 if (!hdev->sent_cmd)
5028 return;
5029
5030 sent = (void *) hdev->sent_cmd->data;
5031 opcode = __le16_to_cpu(sent->opcode);
5032 if (opcode == HCI_OP_RESET)
5033 return;
5034
5035 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5036 if (!skb)
5037 return;
5038
5039 skb_queue_head(&hdev->cmd_q, skb);
5040 queue_work(hdev->workqueue, &hdev->cmd_work);
5041}
5042
Johan Hedberg9238f362013-03-05 20:37:48 +02005043void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5044{
5045 hci_req_complete_t req_complete = NULL;
5046 struct sk_buff *skb;
5047 unsigned long flags;
5048
5049 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5050
Johan Hedberg42c6b122013-03-05 20:37:49 +02005051 /* If the completed command doesn't match the last one that was
5052 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005053 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005054 if (!hci_sent_cmd_data(hdev, opcode)) {
5055 /* Some CSR based controllers generate a spontaneous
5056 * reset complete event during init and any pending
5057 * command will never be completed. In such a case we
5058 * need to resend whatever was the last sent
5059 * command.
5060 */
5061 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5062 hci_resend_last(hdev);
5063
Johan Hedberg9238f362013-03-05 20:37:48 +02005064 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005065 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005066
5067 /* If the command succeeded and there's still more commands in
5068 * this request the request is not yet complete.
5069 */
5070 if (!status && !hci_req_is_complete(hdev))
5071 return;
5072
5073 /* If this was the last command in a request the complete
5074 * callback would be found in hdev->sent_cmd instead of the
5075 * command queue (hdev->cmd_q).
5076 */
5077 if (hdev->sent_cmd) {
5078 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005079
5080 if (req_complete) {
5081 /* We must set the complete callback to NULL to
5082 * avoid calling the callback more than once if
5083 * this function gets called again.
5084 */
5085 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5086
Johan Hedberg9238f362013-03-05 20:37:48 +02005087 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005088 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005089 }
5090
5091 /* Remove all pending commands belonging to this request */
5092 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5093 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5094 if (bt_cb(skb)->req.start) {
5095 __skb_queue_head(&hdev->cmd_q, skb);
5096 break;
5097 }
5098
5099 req_complete = bt_cb(skb)->req.complete;
5100 kfree_skb(skb);
5101 }
5102 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5103
5104call_complete:
5105 if (req_complete)
5106 req_complete(hdev, status);
5107}
5108
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005109static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005111 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112 struct sk_buff *skb;
5113
5114 BT_DBG("%s", hdev->name);
5115
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005117 /* Send copy to monitor */
5118 hci_send_to_monitor(hdev, skb);
5119
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120 if (atomic_read(&hdev->promisc)) {
5121 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005122 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123 }
5124
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07005125 if (test_bit(HCI_RAW, &hdev->flags) ||
5126 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127 kfree_skb(skb);
5128 continue;
5129 }
5130
5131 if (test_bit(HCI_INIT, &hdev->flags)) {
5132 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005133 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134 case HCI_ACLDATA_PKT:
5135 case HCI_SCODATA_PKT:
5136 kfree_skb(skb);
5137 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139 }
5140
5141 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005142 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005144 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145 hci_event_packet(hdev, skb);
5146 break;
5147
5148 case HCI_ACLDATA_PKT:
5149 BT_DBG("%s ACL data packet", hdev->name);
5150 hci_acldata_packet(hdev, skb);
5151 break;
5152
5153 case HCI_SCODATA_PKT:
5154 BT_DBG("%s SCO data packet", hdev->name);
5155 hci_scodata_packet(hdev, skb);
5156 break;
5157
5158 default:
5159 kfree_skb(skb);
5160 break;
5161 }
5162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163}
5164
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005165static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005167 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 struct sk_buff *skb;
5169
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005170 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5171 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005174 if (atomic_read(&hdev->cmd_cnt)) {
5175 skb = skb_dequeue(&hdev->cmd_q);
5176 if (!skb)
5177 return;
5178
Wei Yongjun7585b972009-02-25 18:29:52 +08005179 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005181 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005182 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005184 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005185 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005186 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005187 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005188 schedule_delayed_work(&hdev->cmd_timer,
5189 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 } else {
5191 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005192 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193 }
5194 }
5195}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005196
5197void hci_req_add_le_scan_disable(struct hci_request *req)
5198{
5199 struct hci_cp_le_set_scan_enable cp;
5200
5201 memset(&cp, 0, sizeof(cp));
5202 cp.enable = LE_SCAN_DISABLE;
5203 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5204}
Andre Guedesa4790db2014-02-26 20:21:47 -03005205
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005206void hci_req_add_le_passive_scan(struct hci_request *req)
5207{
5208 struct hci_cp_le_set_scan_param param_cp;
5209 struct hci_cp_le_set_scan_enable enable_cp;
5210 struct hci_dev *hdev = req->hdev;
5211 u8 own_addr_type;
5212
5213 /* Set require_privacy to true to avoid identification from
5214 * unknown peer devices. Since this is passive scanning, no
5215 * SCAN_REQ using the local identity should be sent. Mandating
5216 * privacy is just an extra precaution.
5217 */
5218 if (hci_update_random_address(req, true, &own_addr_type))
5219 return;
5220
5221 memset(&param_cp, 0, sizeof(param_cp));
5222 param_cp.type = LE_SCAN_PASSIVE;
5223 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5224 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5225 param_cp.own_address_type = own_addr_type;
5226 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5227 &param_cp);
5228
5229 memset(&enable_cp, 0, sizeof(enable_cp));
5230 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005231 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005232 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5233 &enable_cp);
5234}
5235
Andre Guedesa4790db2014-02-26 20:21:47 -03005236static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5237{
5238 if (status)
5239 BT_DBG("HCI request failed to update background scanning: "
5240 "status 0x%2.2x", status);
5241}
5242
5243/* This function controls the background scanning based on hdev->pend_le_conns
5244 * list. If there are pending LE connection we start the background scanning,
5245 * otherwise we stop it.
5246 *
5247 * This function requires the caller holds hdev->lock.
5248 */
5249void hci_update_background_scan(struct hci_dev *hdev)
5250{
Andre Guedesa4790db2014-02-26 20:21:47 -03005251 struct hci_request req;
5252 struct hci_conn *conn;
5253 int err;
5254
5255 hci_req_init(&req, hdev);
5256
5257 if (list_empty(&hdev->pend_le_conns)) {
5258 /* If there is no pending LE connections, we should stop
5259 * the background scanning.
5260 */
5261
5262 /* If controller is not scanning we are done. */
5263 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5264 return;
5265
5266 hci_req_add_le_scan_disable(&req);
5267
5268 BT_DBG("%s stopping background scanning", hdev->name);
5269 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005270 /* If there is at least one pending LE connection, we should
5271 * keep the background scan running.
5272 */
5273
Andre Guedesa4790db2014-02-26 20:21:47 -03005274 /* If controller is connecting, we should not start scanning
5275 * since some controllers are not able to scan and connect at
5276 * the same time.
5277 */
5278 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5279 if (conn)
5280 return;
5281
Andre Guedes4340a122014-03-10 18:26:24 -03005282 /* If controller is currently scanning, we stop it to ensure we
5283 * don't miss any advertising (due to duplicates filter).
5284 */
5285 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5286 hci_req_add_le_scan_disable(&req);
5287
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005288 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005289
5290 BT_DBG("%s starting background scanning", hdev->name);
5291 }
5292
5293 err = hci_req_run(&req, update_background_scan_complete);
5294 if (err)
5295 BT_ERR("Failed to run HCI request: err %d", err);
5296}