blob: 2cfaaa6acb04c45b2a43eb93ae18948a0f870e36 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020041#include "smp.h"
42
Marcel Holtmannb78752c2010-08-08 23:06:53 -040043static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020044static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020045static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/* HCI device list */
48LIST_HEAD(hci_dev_list);
49DEFINE_RWLOCK(hci_dev_list_lock);
50
51/* HCI callback list */
52LIST_HEAD(hci_cb_list);
53DEFINE_RWLOCK(hci_cb_list_lock);
54
Sasha Levin3df92b32012-05-27 22:36:56 +020055/* HCI ID Numbering */
56static DEFINE_IDA(hci_index_ida);
57
Marcel Holtmann899de762014-07-11 05:51:58 +020058/* ----- HCI requests ----- */
59
60#define HCI_REQ_DONE 0
61#define HCI_REQ_PEND 1
62#define HCI_REQ_CANCELED 2
63
64#define hci_req_lock(d) mutex_lock(&d->req_lock)
65#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/* ---- HCI notifications ---- */
68
Marcel Holtmann65164552005-10-28 19:20:48 +020069static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070{
Marcel Holtmann040030e2012-02-20 14:50:37 +010071 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
73
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070074/* ---- HCI debugfs entries ---- */
75
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070076static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 char buf[3];
81
Marcel Holtmann111902f2014-06-21 04:53:17 +020082 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070083 buf[1] = '\n';
84 buf[2] = '\0';
85 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
86}
87
88static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
89 size_t count, loff_t *ppos)
90{
91 struct hci_dev *hdev = file->private_data;
92 struct sk_buff *skb;
93 char buf[32];
94 size_t buf_size = min(count, (sizeof(buf)-1));
95 bool enable;
96 int err;
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
Marcel Holtmann111902f2014-06-21 04:53:17 +0200108 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
123 err = -bt_to_errno(skb->data[0]);
124 kfree_skb(skb);
125
126 if (err < 0)
127 return err;
128
Marcel Holtmann111902f2014-06-21 04:53:17 +0200129 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700130
131 return count;
132}
133
134static const struct file_operations dut_mode_fops = {
135 .open = simple_open,
136 .read = dut_mode_read,
137 .write = dut_mode_write,
138 .llseek = default_llseek,
139};
140
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700141static int features_show(struct seq_file *f, void *ptr)
142{
143 struct hci_dev *hdev = f->private;
144 u8 p;
145
146 hci_dev_lock(hdev);
147 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700148 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700149 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
150 hdev->features[p][0], hdev->features[p][1],
151 hdev->features[p][2], hdev->features[p][3],
152 hdev->features[p][4], hdev->features[p][5],
153 hdev->features[p][6], hdev->features[p][7]);
154 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700155 if (lmp_le_capable(hdev))
156 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
157 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
158 hdev->le_features[0], hdev->le_features[1],
159 hdev->le_features[2], hdev->le_features[3],
160 hdev->le_features[4], hdev->le_features[5],
161 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700162 hci_dev_unlock(hdev);
163
164 return 0;
165}
166
167static int features_open(struct inode *inode, struct file *file)
168{
169 return single_open(file, features_show, inode->i_private);
170}
171
172static const struct file_operations features_fops = {
173 .open = features_open,
174 .read = seq_read,
175 .llseek = seq_lseek,
176 .release = single_release,
177};
178
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700179static int blacklist_show(struct seq_file *f, void *p)
180{
181 struct hci_dev *hdev = f->private;
182 struct bdaddr_list *b;
183
184 hci_dev_lock(hdev);
185 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700186 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700187 hci_dev_unlock(hdev);
188
189 return 0;
190}
191
192static int blacklist_open(struct inode *inode, struct file *file)
193{
194 return single_open(file, blacklist_show, inode->i_private);
195}
196
197static const struct file_operations blacklist_fops = {
198 .open = blacklist_open,
199 .read = seq_read,
200 .llseek = seq_lseek,
201 .release = single_release,
202};
203
Marcel Holtmann47219832013-10-17 17:24:15 -0700204static int uuids_show(struct seq_file *f, void *p)
205{
206 struct hci_dev *hdev = f->private;
207 struct bt_uuid *uuid;
208
209 hci_dev_lock(hdev);
210 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700211 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700212
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700213 /* The Bluetooth UUID values are stored in big endian,
214 * but with reversed byte order. So convert them into
215 * the right order for the %pUb modifier.
216 */
217 for (i = 0; i < 16; i++)
218 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700219
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700220 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700221 }
222 hci_dev_unlock(hdev);
223
224 return 0;
225}
226
227static int uuids_open(struct inode *inode, struct file *file)
228{
229 return single_open(file, uuids_show, inode->i_private);
230}
231
232static const struct file_operations uuids_fops = {
233 .open = uuids_open,
234 .read = seq_read,
235 .llseek = seq_lseek,
236 .release = single_release,
237};
238
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700239static int inquiry_cache_show(struct seq_file *f, void *p)
240{
241 struct hci_dev *hdev = f->private;
242 struct discovery_state *cache = &hdev->discovery;
243 struct inquiry_entry *e;
244
245 hci_dev_lock(hdev);
246
247 list_for_each_entry(e, &cache->all, all) {
248 struct inquiry_data *data = &e->data;
249 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
250 &data->bdaddr,
251 data->pscan_rep_mode, data->pscan_period_mode,
252 data->pscan_mode, data->dev_class[2],
253 data->dev_class[1], data->dev_class[0],
254 __le16_to_cpu(data->clock_offset),
255 data->rssi, data->ssp_mode, e->timestamp);
256 }
257
258 hci_dev_unlock(hdev);
259
260 return 0;
261}
262
263static int inquiry_cache_open(struct inode *inode, struct file *file)
264{
265 return single_open(file, inquiry_cache_show, inode->i_private);
266}
267
268static const struct file_operations inquiry_cache_fops = {
269 .open = inquiry_cache_open,
270 .read = seq_read,
271 .llseek = seq_lseek,
272 .release = single_release,
273};
274
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700275static int link_keys_show(struct seq_file *f, void *ptr)
276{
277 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200278 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700279
Johan Hedberg0378b592014-11-19 15:22:22 +0200280 rcu_read_lock();
281 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200284 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700285
286 return 0;
287}
288
289static int link_keys_open(struct inode *inode, struct file *file)
290{
291 return single_open(file, link_keys_show, inode->i_private);
292}
293
294static const struct file_operations link_keys_fops = {
295 .open = link_keys_open,
296 .read = seq_read,
297 .llseek = seq_lseek,
298 .release = single_release,
299};
300
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700301static int dev_class_show(struct seq_file *f, void *ptr)
302{
303 struct hci_dev *hdev = f->private;
304
305 hci_dev_lock(hdev);
306 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
307 hdev->dev_class[1], hdev->dev_class[0]);
308 hci_dev_unlock(hdev);
309
310 return 0;
311}
312
313static int dev_class_open(struct inode *inode, struct file *file)
314{
315 return single_open(file, dev_class_show, inode->i_private);
316}
317
318static const struct file_operations dev_class_fops = {
319 .open = dev_class_open,
320 .read = seq_read,
321 .llseek = seq_lseek,
322 .release = single_release,
323};
324
Marcel Holtmann041000b2013-10-17 12:02:31 -0700325static int voice_setting_get(void *data, u64 *val)
326{
327 struct hci_dev *hdev = data;
328
329 hci_dev_lock(hdev);
330 *val = hdev->voice_setting;
331 hci_dev_unlock(hdev);
332
333 return 0;
334}
335
336DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
337 NULL, "0x%4.4llx\n");
338
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700339static int auto_accept_delay_set(void *data, u64 val)
340{
341 struct hci_dev *hdev = data;
342
343 hci_dev_lock(hdev);
344 hdev->auto_accept_delay = val;
345 hci_dev_unlock(hdev);
346
347 return 0;
348}
349
350static int auto_accept_delay_get(void *data, u64 *val)
351{
352 struct hci_dev *hdev = data;
353
354 hci_dev_lock(hdev);
355 *val = hdev->auto_accept_delay;
356 hci_dev_unlock(hdev);
357
358 return 0;
359}
360
361DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
362 auto_accept_delay_set, "%llu\n");
363
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800364static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
365 size_t count, loff_t *ppos)
366{
367 struct hci_dev *hdev = file->private_data;
368 char buf[3];
369
Marcel Holtmann111902f2014-06-21 04:53:17 +0200370 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800371 buf[1] = '\n';
372 buf[2] = '\0';
373 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
374}
375
376static ssize_t force_sc_support_write(struct file *file,
377 const char __user *user_buf,
378 size_t count, loff_t *ppos)
379{
380 struct hci_dev *hdev = file->private_data;
381 char buf[32];
382 size_t buf_size = min(count, (sizeof(buf)-1));
383 bool enable;
384
385 if (test_bit(HCI_UP, &hdev->flags))
386 return -EBUSY;
387
388 if (copy_from_user(buf, user_buf, buf_size))
389 return -EFAULT;
390
391 buf[buf_size] = '\0';
392 if (strtobool(buf, &enable))
393 return -EINVAL;
394
Marcel Holtmann111902f2014-06-21 04:53:17 +0200395 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800396 return -EALREADY;
397
Marcel Holtmann111902f2014-06-21 04:53:17 +0200398 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800399
400 return count;
401}
402
403static const struct file_operations force_sc_support_fops = {
404 .open = simple_open,
405 .read = force_sc_support_read,
406 .write = force_sc_support_write,
407 .llseek = default_llseek,
408};
409
Johan Hedberg858cdc72014-10-16 10:45:31 +0200410static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
411 size_t count, loff_t *ppos)
412{
413 struct hci_dev *hdev = file->private_data;
414 char buf[3];
415
416 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
417 buf[1] = '\n';
418 buf[2] = '\0';
419 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
420}
421
422static ssize_t force_lesc_support_write(struct file *file,
423 const char __user *user_buf,
424 size_t count, loff_t *ppos)
425{
426 struct hci_dev *hdev = file->private_data;
427 char buf[32];
428 size_t buf_size = min(count, (sizeof(buf)-1));
429 bool enable;
430
431 if (copy_from_user(buf, user_buf, buf_size))
432 return -EFAULT;
433
434 buf[buf_size] = '\0';
435 if (strtobool(buf, &enable))
436 return -EINVAL;
437
438 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
439 return -EALREADY;
440
441 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
442
443 return count;
444}
445
446static const struct file_operations force_lesc_support_fops = {
447 .open = simple_open,
448 .read = force_lesc_support_read,
449 .write = force_lesc_support_write,
450 .llseek = default_llseek,
451};
452
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800453static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
454 size_t count, loff_t *ppos)
455{
456 struct hci_dev *hdev = file->private_data;
457 char buf[3];
458
459 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
460 buf[1] = '\n';
461 buf[2] = '\0';
462 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
463}
464
465static const struct file_operations sc_only_mode_fops = {
466 .open = simple_open,
467 .read = sc_only_mode_read,
468 .llseek = default_llseek,
469};
470
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700471static int idle_timeout_set(void *data, u64 val)
472{
473 struct hci_dev *hdev = data;
474
475 if (val != 0 && (val < 500 || val > 3600000))
476 return -EINVAL;
477
478 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700479 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485static int idle_timeout_get(void *data, u64 *val)
486{
487 struct hci_dev *hdev = data;
488
489 hci_dev_lock(hdev);
490 *val = hdev->idle_timeout;
491 hci_dev_unlock(hdev);
492
493 return 0;
494}
495
496DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
497 idle_timeout_set, "%llu\n");
498
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200499static int rpa_timeout_set(void *data, u64 val)
500{
501 struct hci_dev *hdev = data;
502
503 /* Require the RPA timeout to be at least 30 seconds and at most
504 * 24 hours.
505 */
506 if (val < 30 || val > (60 * 60 * 24))
507 return -EINVAL;
508
509 hci_dev_lock(hdev);
510 hdev->rpa_timeout = val;
511 hci_dev_unlock(hdev);
512
513 return 0;
514}
515
516static int rpa_timeout_get(void *data, u64 *val)
517{
518 struct hci_dev *hdev = data;
519
520 hci_dev_lock(hdev);
521 *val = hdev->rpa_timeout;
522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
528 rpa_timeout_set, "%llu\n");
529
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700530static int sniff_min_interval_set(void *data, u64 val)
531{
532 struct hci_dev *hdev = data;
533
534 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
535 return -EINVAL;
536
537 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700538 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700539 hci_dev_unlock(hdev);
540
541 return 0;
542}
543
544static int sniff_min_interval_get(void *data, u64 *val)
545{
546 struct hci_dev *hdev = data;
547
548 hci_dev_lock(hdev);
549 *val = hdev->sniff_min_interval;
550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
556 sniff_min_interval_set, "%llu\n");
557
558static int sniff_max_interval_set(void *data, u64 val)
559{
560 struct hci_dev *hdev = data;
561
562 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
563 return -EINVAL;
564
565 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700566 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700567 hci_dev_unlock(hdev);
568
569 return 0;
570}
571
572static int sniff_max_interval_get(void *data, u64 *val)
573{
574 struct hci_dev *hdev = data;
575
576 hci_dev_lock(hdev);
577 *val = hdev->sniff_max_interval;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
584 sniff_max_interval_set, "%llu\n");
585
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200586static int conn_info_min_age_set(void *data, u64 val)
587{
588 struct hci_dev *hdev = data;
589
590 if (val == 0 || val > hdev->conn_info_max_age)
591 return -EINVAL;
592
593 hci_dev_lock(hdev);
594 hdev->conn_info_min_age = val;
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int conn_info_min_age_get(void *data, u64 *val)
601{
602 struct hci_dev *hdev = data;
603
604 hci_dev_lock(hdev);
605 *val = hdev->conn_info_min_age;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
612 conn_info_min_age_set, "%llu\n");
613
614static int conn_info_max_age_set(void *data, u64 val)
615{
616 struct hci_dev *hdev = data;
617
618 if (val == 0 || val < hdev->conn_info_min_age)
619 return -EINVAL;
620
621 hci_dev_lock(hdev);
622 hdev->conn_info_max_age = val;
623 hci_dev_unlock(hdev);
624
625 return 0;
626}
627
628static int conn_info_max_age_get(void *data, u64 *val)
629{
630 struct hci_dev *hdev = data;
631
632 hci_dev_lock(hdev);
633 *val = hdev->conn_info_max_age;
634 hci_dev_unlock(hdev);
635
636 return 0;
637}
638
639DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
640 conn_info_max_age_set, "%llu\n");
641
Marcel Holtmannac345812014-02-23 12:44:25 -0800642static int identity_show(struct seq_file *f, void *p)
643{
644 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200645 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800646 u8 addr_type;
647
648 hci_dev_lock(hdev);
649
Johan Hedberga1f4c312014-02-27 14:05:41 +0200650 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800651
Johan Hedberga1f4c312014-02-27 14:05:41 +0200652 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800653 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800654
655 hci_dev_unlock(hdev);
656
657 return 0;
658}
659
660static int identity_open(struct inode *inode, struct file *file)
661{
662 return single_open(file, identity_show, inode->i_private);
663}
664
665static const struct file_operations identity_fops = {
666 .open = identity_open,
667 .read = seq_read,
668 .llseek = seq_lseek,
669 .release = single_release,
670};
671
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800672static int random_address_show(struct seq_file *f, void *p)
673{
674 struct hci_dev *hdev = f->private;
675
676 hci_dev_lock(hdev);
677 seq_printf(f, "%pMR\n", &hdev->random_addr);
678 hci_dev_unlock(hdev);
679
680 return 0;
681}
682
683static int random_address_open(struct inode *inode, struct file *file)
684{
685 return single_open(file, random_address_show, inode->i_private);
686}
687
688static const struct file_operations random_address_fops = {
689 .open = random_address_open,
690 .read = seq_read,
691 .llseek = seq_lseek,
692 .release = single_release,
693};
694
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700695static int static_address_show(struct seq_file *f, void *p)
696{
697 struct hci_dev *hdev = f->private;
698
699 hci_dev_lock(hdev);
700 seq_printf(f, "%pMR\n", &hdev->static_addr);
701 hci_dev_unlock(hdev);
702
703 return 0;
704}
705
706static int static_address_open(struct inode *inode, struct file *file)
707{
708 return single_open(file, static_address_show, inode->i_private);
709}
710
711static const struct file_operations static_address_fops = {
712 .open = static_address_open,
713 .read = seq_read,
714 .llseek = seq_lseek,
715 .release = single_release,
716};
717
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800718static ssize_t force_static_address_read(struct file *file,
719 char __user *user_buf,
720 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700721{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800722 struct hci_dev *hdev = file->private_data;
723 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700724
Marcel Holtmann111902f2014-06-21 04:53:17 +0200725 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800726 buf[1] = '\n';
727 buf[2] = '\0';
728 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
729}
730
731static ssize_t force_static_address_write(struct file *file,
732 const char __user *user_buf,
733 size_t count, loff_t *ppos)
734{
735 struct hci_dev *hdev = file->private_data;
736 char buf[32];
737 size_t buf_size = min(count, (sizeof(buf)-1));
738 bool enable;
739
740 if (test_bit(HCI_UP, &hdev->flags))
741 return -EBUSY;
742
743 if (copy_from_user(buf, user_buf, buf_size))
744 return -EFAULT;
745
746 buf[buf_size] = '\0';
747 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700748 return -EINVAL;
749
Marcel Holtmann111902f2014-06-21 04:53:17 +0200750 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800751 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700752
Marcel Holtmann111902f2014-06-21 04:53:17 +0200753 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800754
755 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700756}
757
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800758static const struct file_operations force_static_address_fops = {
759 .open = simple_open,
760 .read = force_static_address_read,
761 .write = force_static_address_write,
762 .llseek = default_llseek,
763};
Marcel Holtmann92202182013-10-18 16:38:10 -0700764
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800765static int white_list_show(struct seq_file *f, void *ptr)
766{
767 struct hci_dev *hdev = f->private;
768 struct bdaddr_list *b;
769
770 hci_dev_lock(hdev);
771 list_for_each_entry(b, &hdev->le_white_list, list)
772 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
773 hci_dev_unlock(hdev);
774
775 return 0;
776}
777
778static int white_list_open(struct inode *inode, struct file *file)
779{
780 return single_open(file, white_list_show, inode->i_private);
781}
782
783static const struct file_operations white_list_fops = {
784 .open = white_list_open,
785 .read = seq_read,
786 .llseek = seq_lseek,
787 .release = single_release,
788};
789
Marcel Holtmann3698d702014-02-18 21:54:49 -0800790static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
791{
792 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200793 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800794
Johan Hedbergadae20c2014-11-13 14:37:48 +0200795 rcu_read_lock();
796 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800797 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
798 &irk->bdaddr, irk->addr_type,
799 16, irk->val, &irk->rpa);
800 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200801 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800802
803 return 0;
804}
805
806static int identity_resolving_keys_open(struct inode *inode, struct file *file)
807{
808 return single_open(file, identity_resolving_keys_show,
809 inode->i_private);
810}
811
812static const struct file_operations identity_resolving_keys_fops = {
813 .open = identity_resolving_keys_open,
814 .read = seq_read,
815 .llseek = seq_lseek,
816 .release = single_release,
817};
818
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700819static int long_term_keys_show(struct seq_file *f, void *ptr)
820{
821 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200822 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700823
Johan Hedberg970d0f12014-11-13 14:37:47 +0200824 rcu_read_lock();
825 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800826 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700827 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
828 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800829 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200830 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700831
832 return 0;
833}
834
835static int long_term_keys_open(struct inode *inode, struct file *file)
836{
837 return single_open(file, long_term_keys_show, inode->i_private);
838}
839
840static const struct file_operations long_term_keys_fops = {
841 .open = long_term_keys_open,
842 .read = seq_read,
843 .llseek = seq_lseek,
844 .release = single_release,
845};
846
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700847static int conn_min_interval_set(void *data, u64 val)
848{
849 struct hci_dev *hdev = data;
850
851 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852 return -EINVAL;
853
854 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700855 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700856 hci_dev_unlock(hdev);
857
858 return 0;
859}
860
861static int conn_min_interval_get(void *data, u64 *val)
862{
863 struct hci_dev *hdev = data;
864
865 hci_dev_lock(hdev);
866 *val = hdev->le_conn_min_interval;
867 hci_dev_unlock(hdev);
868
869 return 0;
870}
871
872DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873 conn_min_interval_set, "%llu\n");
874
875static int conn_max_interval_set(void *data, u64 val)
876{
877 struct hci_dev *hdev = data;
878
879 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880 return -EINVAL;
881
882 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700883 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700884 hci_dev_unlock(hdev);
885
886 return 0;
887}
888
889static int conn_max_interval_get(void *data, u64 *val)
890{
891 struct hci_dev *hdev = data;
892
893 hci_dev_lock(hdev);
894 *val = hdev->le_conn_max_interval;
895 hci_dev_unlock(hdev);
896
897 return 0;
898}
899
900DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n");
902
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200903static int conn_latency_set(void *data, u64 val)
904{
905 struct hci_dev *hdev = data;
906
907 if (val > 0x01f3)
908 return -EINVAL;
909
910 hci_dev_lock(hdev);
911 hdev->le_conn_latency = val;
912 hci_dev_unlock(hdev);
913
914 return 0;
915}
916
917static int conn_latency_get(void *data, u64 *val)
918{
919 struct hci_dev *hdev = data;
920
921 hci_dev_lock(hdev);
922 *val = hdev->le_conn_latency;
923 hci_dev_unlock(hdev);
924
925 return 0;
926}
927
928DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
929 conn_latency_set, "%llu\n");
930
Marcel Holtmannf1649572014-06-30 12:34:38 +0200931static int supervision_timeout_set(void *data, u64 val)
932{
933 struct hci_dev *hdev = data;
934
935 if (val < 0x000a || val > 0x0c80)
936 return -EINVAL;
937
938 hci_dev_lock(hdev);
939 hdev->le_supv_timeout = val;
940 hci_dev_unlock(hdev);
941
942 return 0;
943}
944
945static int supervision_timeout_get(void *data, u64 *val)
946{
947 struct hci_dev *hdev = data;
948
949 hci_dev_lock(hdev);
950 *val = hdev->le_supv_timeout;
951 hci_dev_unlock(hdev);
952
953 return 0;
954}
955
956DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
957 supervision_timeout_set, "%llu\n");
958
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800959static int adv_channel_map_set(void *data, u64 val)
960{
961 struct hci_dev *hdev = data;
962
963 if (val < 0x01 || val > 0x07)
964 return -EINVAL;
965
966 hci_dev_lock(hdev);
967 hdev->le_adv_channel_map = val;
968 hci_dev_unlock(hdev);
969
970 return 0;
971}
972
973static int adv_channel_map_get(void *data, u64 *val)
974{
975 struct hci_dev *hdev = data;
976
977 hci_dev_lock(hdev);
978 *val = hdev->le_adv_channel_map;
979 hci_dev_unlock(hdev);
980
981 return 0;
982}
983
984DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
985 adv_channel_map_set, "%llu\n");
986
Georg Lukas729a1052014-07-26 13:59:58 +0200987static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200988{
Georg Lukas729a1052014-07-26 13:59:58 +0200989 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200990
Georg Lukas729a1052014-07-26 13:59:58 +0200991 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200992 return -EINVAL;
993
Andre Guedes7d474e02014-02-26 20:21:54 -0300994 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200995 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300996 hci_dev_unlock(hdev);
997
998 return 0;
999}
1000
Georg Lukas729a1052014-07-26 13:59:58 +02001001static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001002{
Georg Lukas729a1052014-07-26 13:59:58 +02001003 struct hci_dev *hdev = data;
1004
1005 hci_dev_lock(hdev);
1006 *val = hdev->le_adv_min_interval;
1007 hci_dev_unlock(hdev);
1008
1009 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001010}
1011
Georg Lukas729a1052014-07-26 13:59:58 +02001012DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1013 adv_min_interval_set, "%llu\n");
1014
1015static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001016{
Georg Lukas729a1052014-07-26 13:59:58 +02001017 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001018
Georg Lukas729a1052014-07-26 13:59:58 +02001019 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001020 return -EINVAL;
1021
Georg Lukas729a1052014-07-26 13:59:58 +02001022 hci_dev_lock(hdev);
1023 hdev->le_adv_max_interval = val;
1024 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001025
Georg Lukas729a1052014-07-26 13:59:58 +02001026 return 0;
1027}
Andre Guedes7d474e02014-02-26 20:21:54 -03001028
Georg Lukas729a1052014-07-26 13:59:58 +02001029static int adv_max_interval_get(void *data, u64 *val)
1030{
1031 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001032
Georg Lukas729a1052014-07-26 13:59:58 +02001033 hci_dev_lock(hdev);
1034 *val = hdev->le_adv_max_interval;
1035 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001036
Georg Lukas729a1052014-07-26 13:59:58 +02001037 return 0;
1038}
Andre Guedes7d474e02014-02-26 20:21:54 -03001039
Georg Lukas729a1052014-07-26 13:59:58 +02001040DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1041 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001042
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001043static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001044{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001045 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001046 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001047 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001048
Andre Guedes7d474e02014-02-26 20:21:54 -03001049 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001050 list_for_each_entry(b, &hdev->whitelist, list)
1051 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001052 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001053 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001054 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001055 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001056 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001057
Andre Guedes7d474e02014-02-26 20:21:54 -03001058 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001059}
1060
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001061static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001062{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001063 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001064}
1065
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001066static const struct file_operations device_list_fops = {
1067 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001068 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001069 .llseek = seq_lseek,
1070 .release = single_release,
1071};
1072
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073/* ---- HCI requests ---- */
1074
Johan Hedberg42c6b122013-03-05 20:37:49 +02001075static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 if (hdev->req_status == HCI_REQ_PEND) {
1080 hdev->req_result = result;
1081 hdev->req_status = HCI_REQ_DONE;
1082 wake_up_interruptible(&hdev->req_wait_q);
1083 }
1084}
1085
1086static void hci_req_cancel(struct hci_dev *hdev, int err)
1087{
1088 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1089
1090 if (hdev->req_status == HCI_REQ_PEND) {
1091 hdev->req_result = err;
1092 hdev->req_status = HCI_REQ_CANCELED;
1093 wake_up_interruptible(&hdev->req_wait_q);
1094 }
1095}
1096
Fengguang Wu77a63e02013-04-20 16:24:31 +03001097static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1098 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001099{
1100 struct hci_ev_cmd_complete *ev;
1101 struct hci_event_hdr *hdr;
1102 struct sk_buff *skb;
1103
1104 hci_dev_lock(hdev);
1105
1106 skb = hdev->recv_evt;
1107 hdev->recv_evt = NULL;
1108
1109 hci_dev_unlock(hdev);
1110
1111 if (!skb)
1112 return ERR_PTR(-ENODATA);
1113
1114 if (skb->len < sizeof(*hdr)) {
1115 BT_ERR("Too short HCI event");
1116 goto failed;
1117 }
1118
1119 hdr = (void *) skb->data;
1120 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1121
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001122 if (event) {
1123 if (hdr->evt != event)
1124 goto failed;
1125 return skb;
1126 }
1127
Johan Hedberg75e84b72013-04-02 13:35:04 +03001128 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1129 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1130 goto failed;
1131 }
1132
1133 if (skb->len < sizeof(*ev)) {
1134 BT_ERR("Too short cmd_complete event");
1135 goto failed;
1136 }
1137
1138 ev = (void *) skb->data;
1139 skb_pull(skb, sizeof(*ev));
1140
1141 if (opcode == __le16_to_cpu(ev->opcode))
1142 return skb;
1143
1144 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1145 __le16_to_cpu(ev->opcode));
1146
1147failed:
1148 kfree_skb(skb);
1149 return ERR_PTR(-ENODATA);
1150}
1151
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001152struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001153 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001154{
1155 DECLARE_WAITQUEUE(wait, current);
1156 struct hci_request req;
1157 int err = 0;
1158
1159 BT_DBG("%s", hdev->name);
1160
1161 hci_req_init(&req, hdev);
1162
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001163 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001164
1165 hdev->req_status = HCI_REQ_PEND;
1166
Johan Hedberg75e84b72013-04-02 13:35:04 +03001167 add_wait_queue(&hdev->req_wait_q, &wait);
1168 set_current_state(TASK_INTERRUPTIBLE);
1169
Chan-yeol Park039fada2014-10-31 14:23:06 +09001170 err = hci_req_run(&req, hci_req_sync_complete);
1171 if (err < 0) {
1172 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001173 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001174 return ERR_PTR(err);
1175 }
1176
Johan Hedberg75e84b72013-04-02 13:35:04 +03001177 schedule_timeout(timeout);
1178
1179 remove_wait_queue(&hdev->req_wait_q, &wait);
1180
1181 if (signal_pending(current))
1182 return ERR_PTR(-EINTR);
1183
1184 switch (hdev->req_status) {
1185 case HCI_REQ_DONE:
1186 err = -bt_to_errno(hdev->req_result);
1187 break;
1188
1189 case HCI_REQ_CANCELED:
1190 err = -hdev->req_result;
1191 break;
1192
1193 default:
1194 err = -ETIMEDOUT;
1195 break;
1196 }
1197
1198 hdev->req_status = hdev->req_result = 0;
1199
1200 BT_DBG("%s end: err %d", hdev->name, err);
1201
1202 if (err < 0)
1203 return ERR_PTR(err);
1204
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001205 return hci_get_cmd_complete(hdev, opcode, event);
1206}
1207EXPORT_SYMBOL(__hci_cmd_sync_ev);
1208
1209struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001210 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001211{
1212 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001213}
1214EXPORT_SYMBOL(__hci_cmd_sync);
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001217static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001218 void (*func)(struct hci_request *req,
1219 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001220 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 DECLARE_WAITQUEUE(wait, current);
1224 int err = 0;
1225
1226 BT_DBG("%s start", hdev->name);
1227
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_init(&req, hdev);
1229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 hdev->req_status = HCI_REQ_PEND;
1231
Johan Hedberg42c6b122013-03-05 20:37:49 +02001232 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001233
Chan-yeol Park039fada2014-10-31 14:23:06 +09001234 add_wait_queue(&hdev->req_wait_q, &wait);
1235 set_current_state(TASK_INTERRUPTIBLE);
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237 err = hci_req_run(&req, hci_req_sync_complete);
1238 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001239 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001240
Chan-yeol Park039fada2014-10-31 14:23:06 +09001241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001243
Andre Guedes920c8302013-03-08 11:20:15 -03001244 /* ENODATA means the HCI request command queue is empty.
1245 * This can happen when a request with conditionals doesn't
1246 * trigger any commands to be sent. This is normal behavior
1247 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 */
Andre Guedes920c8302013-03-08 11:20:15 -03001249 if (err == -ENODATA)
1250 return 0;
1251
1252 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001253 }
1254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 schedule_timeout(timeout);
1256
1257 remove_wait_queue(&hdev->req_wait_q, &wait);
1258
1259 if (signal_pending(current))
1260 return -EINTR;
1261
1262 switch (hdev->req_status) {
1263 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001264 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 break;
1266
1267 case HCI_REQ_CANCELED:
1268 err = -hdev->req_result;
1269 break;
1270
1271 default:
1272 err = -ETIMEDOUT;
1273 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Johan Hedberga5040ef2011-01-10 13:28:59 +02001276 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
1278 BT_DBG("%s end: err %d", hdev->name, err);
1279
1280 return err;
1281}
1282
Johan Hedberg01178cd2013-03-05 20:37:41 +02001283static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001284 void (*req)(struct hci_request *req,
1285 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001286 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287{
1288 int ret;
1289
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001290 if (!test_bit(HCI_UP, &hdev->flags))
1291 return -ENETDOWN;
1292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 /* Serialize all requests */
1294 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001295 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 hci_req_unlock(hdev);
1297
1298 return ret;
1299}
1300
Johan Hedberg42c6b122013-03-05 20:37:49 +02001301static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001303 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
1305 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306 set_bit(HCI_RESET, &req->hdev->flags);
1307 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308}
1309
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001313
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001315 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001317 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322}
1323
Johan Hedberg42c6b122013-03-05 20:37:49 +02001324static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001325{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001326 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001327
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001328 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001329 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001330
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001331 /* Read Local Supported Commands */
1332 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1333
1334 /* Read Local Supported Features */
1335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1336
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001337 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001339
1340 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001341 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001342
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001343 /* Read Flow Control Mode */
1344 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1345
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001346 /* Read Location Data */
1347 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001348}
1349
Johan Hedberg42c6b122013-03-05 20:37:49 +02001350static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001351{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001353
1354 BT_DBG("%s %ld", hdev->name, opt);
1355
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001356 /* Reset */
1357 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001359
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001360 switch (hdev->dev_type) {
1361 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001363 break;
1364
1365 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001366 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001367 break;
1368
1369 default:
1370 BT_ERR("Unknown device type %d", hdev->dev_type);
1371 break;
1372 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001373}
1374
Johan Hedberg42c6b122013-03-05 20:37:49 +02001375static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001376{
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 __le16 param;
1378 __u8 flt_type;
1379
1380 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
1383 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001384 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001385
1386 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388
1389 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001392 /* Read Number of Supported IAC */
1393 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1394
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001395 /* Read Current IAC LAP */
1396 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1397
Johan Hedberg2177bab2013-03-05 20:37:43 +02001398 /* Clear Event Filters */
1399 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001400 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001401
1402 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001403 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001404 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001405}
1406
Johan Hedberg42c6b122013-03-05 20:37:49 +02001407static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001408{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001409 struct hci_dev *hdev = req->hdev;
1410
Johan Hedberg2177bab2013-03-05 20:37:43 +02001411 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001412 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001413
1414 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001415 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001416
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001417 /* Read LE Supported States */
1418 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1419
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001423 /* Clear LE White List */
1424 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001425
1426 /* LE-only controllers have LE implicitly enabled */
1427 if (!lmp_bredr_capable(hdev))
1428 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429}
1430
1431static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1432{
1433 if (lmp_ext_inq_capable(hdev))
1434 return 0x02;
1435
1436 if (lmp_inq_rssi_capable(hdev))
1437 return 0x01;
1438
1439 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1440 hdev->lmp_subver == 0x0757)
1441 return 0x01;
1442
1443 if (hdev->manufacturer == 15) {
1444 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1445 return 0x01;
1446 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1447 return 0x01;
1448 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1449 return 0x01;
1450 }
1451
1452 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1453 hdev->lmp_subver == 0x1805)
1454 return 0x01;
1455
1456 return 0x00;
1457}
1458
Johan Hedberg42c6b122013-03-05 20:37:49 +02001459static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001460{
1461 u8 mode;
1462
Johan Hedberg42c6b122013-03-05 20:37:49 +02001463 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001464
Johan Hedberg42c6b122013-03-05 20:37:49 +02001465 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001466}
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001469{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001470 struct hci_dev *hdev = req->hdev;
1471
Johan Hedberg2177bab2013-03-05 20:37:43 +02001472 /* The second byte is 0xff instead of 0x9f (two reserved bits
1473 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1474 * command otherwise.
1475 */
1476 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1477
1478 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1479 * any event mask for pre 1.2 devices.
1480 */
1481 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1482 return;
1483
1484 if (lmp_bredr_capable(hdev)) {
1485 events[4] |= 0x01; /* Flow Specification Complete */
1486 events[4] |= 0x02; /* Inquiry Result with RSSI */
1487 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1488 events[5] |= 0x08; /* Synchronous Connection Complete */
1489 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001490 } else {
1491 /* Use a different default for LE-only devices */
1492 memset(events, 0, sizeof(events));
1493 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001494 events[1] |= 0x08; /* Read Remote Version Information Complete */
1495 events[1] |= 0x20; /* Command Complete */
1496 events[1] |= 0x40; /* Command Status */
1497 events[1] |= 0x80; /* Hardware Error */
1498 events[2] |= 0x04; /* Number of Completed Packets */
1499 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001500
1501 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1502 events[0] |= 0x80; /* Encryption Change */
1503 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1504 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001505 }
1506
1507 if (lmp_inq_rssi_capable(hdev))
1508 events[4] |= 0x02; /* Inquiry Result with RSSI */
1509
1510 if (lmp_sniffsubr_capable(hdev))
1511 events[5] |= 0x20; /* Sniff Subrating */
1512
1513 if (lmp_pause_enc_capable(hdev))
1514 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1515
1516 if (lmp_ext_inq_capable(hdev))
1517 events[5] |= 0x40; /* Extended Inquiry Result */
1518
1519 if (lmp_no_flush_capable(hdev))
1520 events[7] |= 0x01; /* Enhanced Flush Complete */
1521
1522 if (lmp_lsto_capable(hdev))
1523 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1524
1525 if (lmp_ssp_capable(hdev)) {
1526 events[6] |= 0x01; /* IO Capability Request */
1527 events[6] |= 0x02; /* IO Capability Response */
1528 events[6] |= 0x04; /* User Confirmation Request */
1529 events[6] |= 0x08; /* User Passkey Request */
1530 events[6] |= 0x10; /* Remote OOB Data Request */
1531 events[6] |= 0x20; /* Simple Pairing Complete */
1532 events[7] |= 0x04; /* User Passkey Notification */
1533 events[7] |= 0x08; /* Keypress Notification */
1534 events[7] |= 0x10; /* Remote Host Supported
1535 * Features Notification
1536 */
1537 }
1538
1539 if (lmp_le_capable(hdev))
1540 events[7] |= 0x20; /* LE Meta-Event */
1541
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543}
1544
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001546{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001547 struct hci_dev *hdev = req->hdev;
1548
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001550 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001551 else
1552 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001553
1554 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001556
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001557 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1558 * local supported commands HCI command.
1559 */
1560 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001561 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001562
1563 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001564 /* When SSP is available, then the host features page
1565 * should also be available as well. However some
1566 * controllers list the max_page as 0 as long as SSP
1567 * has not been enabled. To achieve proper debugging
1568 * output, force the minimum max_page to 1 at least.
1569 */
1570 hdev->max_page = 0x01;
1571
Johan Hedberg2177bab2013-03-05 20:37:43 +02001572 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1573 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1575 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576 } else {
1577 struct hci_cp_write_eir cp;
1578
1579 memset(hdev->eir, 0, sizeof(hdev->eir));
1580 memset(&cp, 0, sizeof(cp));
1581
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001583 }
1584 }
1585
1586 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001587 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001588
1589 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001590 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001591
1592 if (lmp_ext_feat_capable(hdev)) {
1593 struct hci_cp_read_local_ext_features cp;
1594
1595 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1597 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001598 }
1599
1600 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1601 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001602 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1603 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001604 }
1605}
1606
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001608{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001610 struct hci_cp_write_def_link_policy cp;
1611 u16 link_policy = 0;
1612
1613 if (lmp_rswitch_capable(hdev))
1614 link_policy |= HCI_LP_RSWITCH;
1615 if (lmp_hold_capable(hdev))
1616 link_policy |= HCI_LP_HOLD;
1617 if (lmp_sniff_capable(hdev))
1618 link_policy |= HCI_LP_SNIFF;
1619 if (lmp_park_capable(hdev))
1620 link_policy |= HCI_LP_PARK;
1621
1622 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001623 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001624}
1625
Johan Hedberg42c6b122013-03-05 20:37:49 +02001626static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001627{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001628 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001629 struct hci_cp_write_le_host_supported cp;
1630
Johan Hedbergc73eee92013-04-19 18:35:21 +03001631 /* LE-only devices do not support explicit enablement */
1632 if (!lmp_bredr_capable(hdev))
1633 return;
1634
Johan Hedberg2177bab2013-03-05 20:37:43 +02001635 memset(&cp, 0, sizeof(cp));
1636
1637 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1638 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001639 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001640 }
1641
1642 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001643 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1644 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001645}
1646
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001647static void hci_set_event_mask_page_2(struct hci_request *req)
1648{
1649 struct hci_dev *hdev = req->hdev;
1650 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1651
1652 /* If Connectionless Slave Broadcast master role is supported
1653 * enable all necessary events for it.
1654 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001655 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001656 events[1] |= 0x40; /* Triggered Clock Capture */
1657 events[1] |= 0x80; /* Synchronization Train Complete */
1658 events[2] |= 0x10; /* Slave Page Response Timeout */
1659 events[2] |= 0x20; /* CSB Channel Map Change */
1660 }
1661
1662 /* If Connectionless Slave Broadcast slave role is supported
1663 * enable all necessary events for it.
1664 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001665 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001666 events[2] |= 0x01; /* Synchronization Train Received */
1667 events[2] |= 0x02; /* CSB Receive */
1668 events[2] |= 0x04; /* CSB Timeout */
1669 events[2] |= 0x08; /* Truncated Page Complete */
1670 }
1671
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001672 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001673 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001674 events[2] |= 0x80;
1675
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001676 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1677}
1678
Johan Hedberg42c6b122013-03-05 20:37:49 +02001679static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001680{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001681 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001682 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001683
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001684 hci_setup_event_mask(req);
1685
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001686 /* Some Broadcom based Bluetooth controllers do not support the
1687 * Delete Stored Link Key command. They are clearly indicating its
1688 * absence in the bit mask of supported commands.
1689 *
1690 * Check the supported commands and only if the the command is marked
1691 * as supported send it. If not supported assume that the controller
1692 * does not have actual support for stored link keys which makes this
1693 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001694 *
1695 * Some controllers indicate that they support handling deleting
1696 * stored link keys, but they don't. The quirk lets a driver
1697 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001698 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001699 if (hdev->commands[6] & 0x80 &&
1700 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001701 struct hci_cp_delete_stored_link_key cp;
1702
1703 bacpy(&cp.bdaddr, BDADDR_ANY);
1704 cp.delete_all = 0x01;
1705 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1706 sizeof(cp), &cp);
1707 }
1708
Johan Hedberg2177bab2013-03-05 20:37:43 +02001709 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001710 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001711
Marcel Holtmann417287d2014-12-11 20:21:54 +01001712 if (hdev->commands[8] & 0x01)
1713 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1714
1715 /* Some older Broadcom based Bluetooth 1.2 controllers do not
1716 * support the Read Page Scan Type command. Check support for
1717 * this command in the bit mask of supported commands.
1718 */
1719 if (hdev->commands[13] & 0x01)
1720 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1721
Andre Guedes9193c6e2014-07-01 18:10:09 -03001722 if (lmp_le_capable(hdev)) {
1723 u8 events[8];
1724
1725 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001726 events[0] = 0x0f;
1727
1728 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1729 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001730
1731 /* If controller supports the Connection Parameters Request
1732 * Link Layer Procedure, enable the corresponding event.
1733 */
1734 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1735 events[0] |= 0x20; /* LE Remote Connection
1736 * Parameter Request
1737 */
1738
Marcel Holtmann4b71bba2014-12-05 16:20:12 +01001739 /* If the controller supports Extended Scanner Filter
1740 * Policies, enable the correspondig event.
1741 */
1742 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1743 events[1] |= 0x04; /* LE Direct Advertising
1744 * Report
1745 */
1746
Marcel Holtmann5a34bd52014-12-05 16:20:15 +01001747 /* If the controller supports the LE Read Local P-256
1748 * Public Key command, enable the corresponding event.
1749 */
1750 if (hdev->commands[34] & 0x02)
1751 events[0] |= 0x80; /* LE Read Local P-256
1752 * Public Key Complete
1753 */
1754
1755 /* If the controller supports the LE Generate DHKey
1756 * command, enable the corresponding event.
1757 */
1758 if (hdev->commands[34] & 0x04)
1759 events[1] |= 0x01; /* LE Generate DHKey Complete */
1760
Andre Guedes9193c6e2014-07-01 18:10:09 -03001761 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1762 events);
1763
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001764 if (hdev->commands[25] & 0x40) {
1765 /* Read LE Advertising Channel TX Power */
1766 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1767 }
1768
Johan Hedberg42c6b122013-03-05 20:37:49 +02001769 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001770 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001771
1772 /* Read features beyond page 1 if available */
1773 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1774 struct hci_cp_read_local_ext_features cp;
1775
1776 cp.page = p;
1777 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1778 sizeof(cp), &cp);
1779 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001780}
1781
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001782static void hci_init4_req(struct hci_request *req, unsigned long opt)
1783{
1784 struct hci_dev *hdev = req->hdev;
1785
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001786 /* Set event mask page 2 if the HCI command for it is supported */
1787 if (hdev->commands[22] & 0x04)
1788 hci_set_event_mask_page_2(req);
1789
Marcel Holtmann109e3192014-07-23 19:24:56 +02001790 /* Read local codec list if the HCI command is supported */
1791 if (hdev->commands[29] & 0x20)
1792 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1793
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001794 /* Get MWS transport configuration if the HCI command is supported */
1795 if (hdev->commands[30] & 0x08)
1796 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1797
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001798 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001799 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001800 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001801
1802 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001803 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001804 u8 support = 0x01;
1805 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1806 sizeof(support), &support);
1807 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001808}
1809
Johan Hedberg2177bab2013-03-05 20:37:43 +02001810static int __hci_init(struct hci_dev *hdev)
1811{
1812 int err;
1813
1814 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1815 if (err < 0)
1816 return err;
1817
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001818 /* The Device Under Test (DUT) mode is special and available for
1819 * all controller types. So just create it early on.
1820 */
1821 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1822 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1823 &dut_mode_fops);
1824 }
1825
Johan Hedberg2177bab2013-03-05 20:37:43 +02001826 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1827 * BR/EDR/LE type controllers. AMP controllers only need the
1828 * first stage init.
1829 */
1830 if (hdev->dev_type != HCI_BREDR)
1831 return 0;
1832
1833 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1834 if (err < 0)
1835 return err;
1836
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001837 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1838 if (err < 0)
1839 return err;
1840
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001841 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1842 if (err < 0)
1843 return err;
1844
1845 /* Only create debugfs entries during the initial setup
1846 * phase and not every time the controller gets powered on.
1847 */
1848 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1849 return 0;
1850
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001851 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1852 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001853 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1854 &hdev->manufacturer);
1855 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1856 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001857 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1858 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001859 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1860 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001861 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1862
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001863 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1864 &conn_info_min_age_fops);
1865 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1866 &conn_info_max_age_fops);
1867
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001868 if (lmp_bredr_capable(hdev)) {
1869 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1870 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001871 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1872 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001873 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1874 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001875 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1876 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001877 }
1878
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001879 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001880 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1881 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001882 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1883 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001884 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1885 hdev, &sc_only_mode_fops);
Johan Hedberg858cdc72014-10-16 10:45:31 +02001886 if (lmp_le_capable(hdev))
1887 debugfs_create_file("force_lesc_support", 0644,
1888 hdev->debugfs, hdev,
1889 &force_lesc_support_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001890 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001891
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001892 if (lmp_sniff_capable(hdev)) {
1893 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1894 hdev, &idle_timeout_fops);
1895 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1896 hdev, &sniff_min_interval_fops);
1897 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1898 hdev, &sniff_max_interval_fops);
1899 }
1900
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001901 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001902 debugfs_create_file("identity", 0400, hdev->debugfs,
1903 hdev, &identity_fops);
1904 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1905 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001906 debugfs_create_file("random_address", 0444, hdev->debugfs,
1907 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001908 debugfs_create_file("static_address", 0444, hdev->debugfs,
1909 hdev, &static_address_fops);
1910
1911 /* For controllers with a public address, provide a debug
1912 * option to force the usage of the configured static
1913 * address. By default the public address is used.
1914 */
1915 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1916 debugfs_create_file("force_static_address", 0644,
1917 hdev->debugfs, hdev,
1918 &force_static_address_fops);
1919
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001920 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1921 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001922 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1923 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001924 debugfs_create_file("identity_resolving_keys", 0400,
1925 hdev->debugfs, hdev,
1926 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001927 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1928 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001929 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1930 hdev, &conn_min_interval_fops);
1931 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1932 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001933 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1934 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001935 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1936 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001937 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1938 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001939 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1940 hdev, &adv_min_interval_fops);
1941 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1942 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001943 debugfs_create_u16("discov_interleaved_timeout", 0644,
1944 hdev->debugfs,
1945 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001946
Johan Hedberg711eafe2014-08-08 09:32:52 +03001947 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001948 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001949
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001950 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001951}
1952
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001953static void hci_init0_req(struct hci_request *req, unsigned long opt)
1954{
1955 struct hci_dev *hdev = req->hdev;
1956
1957 BT_DBG("%s %ld", hdev->name, opt);
1958
1959 /* Reset */
1960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1961 hci_reset_req(req, 0);
1962
1963 /* Read Local Version */
1964 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1965
1966 /* Read BD Address */
1967 if (hdev->set_bdaddr)
1968 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1969}
1970
1971static int __hci_unconf_init(struct hci_dev *hdev)
1972{
1973 int err;
1974
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001975 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1976 return 0;
1977
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001978 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1979 if (err < 0)
1980 return err;
1981
1982 return 0;
1983}
1984
Johan Hedberg42c6b122013-03-05 20:37:49 +02001985static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986{
1987 __u8 scan = opt;
1988
Johan Hedberg42c6b122013-03-05 20:37:49 +02001989 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
1991 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001992 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993}
1994
Johan Hedberg42c6b122013-03-05 20:37:49 +02001995static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996{
1997 __u8 auth = opt;
1998
Johan Hedberg42c6b122013-03-05 20:37:49 +02001999 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
2001 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002002 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003}
2004
Johan Hedberg42c6b122013-03-05 20:37:49 +02002005static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006{
2007 __u8 encrypt = opt;
2008
Johan Hedberg42c6b122013-03-05 20:37:49 +02002009 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002011 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002012 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013}
2014
Johan Hedberg42c6b122013-03-05 20:37:49 +02002015static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002016{
2017 __le16 policy = cpu_to_le16(opt);
2018
Johan Hedberg42c6b122013-03-05 20:37:49 +02002019 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002020
2021 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002022 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002023}
2024
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002025/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 * Device is held on return. */
2027struct hci_dev *hci_dev_get(int index)
2028{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002029 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
2031 BT_DBG("%d", index);
2032
2033 if (index < 0)
2034 return NULL;
2035
2036 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002037 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 if (d->id == index) {
2039 hdev = hci_dev_hold(d);
2040 break;
2041 }
2042 }
2043 read_unlock(&hci_dev_list_lock);
2044 return hdev;
2045}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
2047/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002048
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002049bool hci_discovery_active(struct hci_dev *hdev)
2050{
2051 struct discovery_state *discov = &hdev->discovery;
2052
Andre Guedes6fbe1952012-02-03 17:47:58 -03002053 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002054 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002055 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002056 return true;
2057
Andre Guedes6fbe1952012-02-03 17:47:58 -03002058 default:
2059 return false;
2060 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002061}
2062
Johan Hedbergff9ef572012-01-04 14:23:45 +02002063void hci_discovery_set_state(struct hci_dev *hdev, int state)
2064{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002065 int old_state = hdev->discovery.state;
2066
Johan Hedbergff9ef572012-01-04 14:23:45 +02002067 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2068
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002069 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002070 return;
2071
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002072 hdev->discovery.state = state;
2073
Johan Hedbergff9ef572012-01-04 14:23:45 +02002074 switch (state) {
2075 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002076 hci_update_background_scan(hdev);
2077
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002078 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002079 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002080 break;
2081 case DISCOVERY_STARTING:
2082 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002083 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002084 mgmt_discovering(hdev, 1);
2085 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002086 case DISCOVERY_RESOLVING:
2087 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002088 case DISCOVERY_STOPPING:
2089 break;
2090 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002091}
2092
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002093void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094{
Johan Hedberg30883512012-01-04 14:16:21 +02002095 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002096 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Johan Hedberg561aafb2012-01-04 13:31:59 +02002098 list_for_each_entry_safe(p, n, &cache->all, all) {
2099 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002100 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002102
2103 INIT_LIST_HEAD(&cache->unknown);
2104 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105}
2106
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002107struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2108 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
Johan Hedberg30883512012-01-04 14:16:21 +02002110 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 struct inquiry_entry *e;
2112
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002113 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Johan Hedberg561aafb2012-01-04 13:31:59 +02002115 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002117 return e;
2118 }
2119
2120 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121}
2122
Johan Hedberg561aafb2012-01-04 13:31:59 +02002123struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002124 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002125{
Johan Hedberg30883512012-01-04 14:16:21 +02002126 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002127 struct inquiry_entry *e;
2128
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002129 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002130
2131 list_for_each_entry(e, &cache->unknown, list) {
2132 if (!bacmp(&e->data.bdaddr, bdaddr))
2133 return e;
2134 }
2135
2136 return NULL;
2137}
2138
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002139struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002140 bdaddr_t *bdaddr,
2141 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002142{
2143 struct discovery_state *cache = &hdev->discovery;
2144 struct inquiry_entry *e;
2145
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002146 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002147
2148 list_for_each_entry(e, &cache->resolve, list) {
2149 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2150 return e;
2151 if (!bacmp(&e->data.bdaddr, bdaddr))
2152 return e;
2153 }
2154
2155 return NULL;
2156}
2157
Johan Hedberga3d4e202012-01-09 00:53:02 +02002158void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002159 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002160{
2161 struct discovery_state *cache = &hdev->discovery;
2162 struct list_head *pos = &cache->resolve;
2163 struct inquiry_entry *p;
2164
2165 list_del(&ie->list);
2166
2167 list_for_each_entry(p, &cache->resolve, list) {
2168 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002169 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002170 break;
2171 pos = &p->list;
2172 }
2173
2174 list_add(&ie->list, pos);
2175}
2176
Marcel Holtmannaf589252014-07-01 14:11:20 +02002177u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2178 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179{
Johan Hedberg30883512012-01-04 14:16:21 +02002180 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002181 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002182 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002184 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
Johan Hedberg6928a922014-10-26 20:46:09 +01002186 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01002187
Marcel Holtmannaf589252014-07-01 14:11:20 +02002188 if (!data->ssp_mode)
2189 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002190
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002191 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002192 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002193 if (!ie->data.ssp_mode)
2194 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002195
Johan Hedberga3d4e202012-01-09 00:53:02 +02002196 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002197 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002198 ie->data.rssi = data->rssi;
2199 hci_inquiry_cache_update_resolve(hdev, ie);
2200 }
2201
Johan Hedberg561aafb2012-01-04 13:31:59 +02002202 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002203 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002204
Johan Hedberg561aafb2012-01-04 13:31:59 +02002205 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002206 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002207 if (!ie) {
2208 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2209 goto done;
2210 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002211
2212 list_add(&ie->all, &cache->all);
2213
2214 if (name_known) {
2215 ie->name_state = NAME_KNOWN;
2216 } else {
2217 ie->name_state = NAME_NOT_KNOWN;
2218 list_add(&ie->list, &cache->unknown);
2219 }
2220
2221update:
2222 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002223 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002224 ie->name_state = NAME_KNOWN;
2225 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 }
2227
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002228 memcpy(&ie->data, data, sizeof(*data));
2229 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002231
2232 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002233 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002234
Marcel Holtmannaf589252014-07-01 14:11:20 +02002235done:
2236 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237}
2238
2239static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2240{
Johan Hedberg30883512012-01-04 14:16:21 +02002241 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 struct inquiry_info *info = (struct inquiry_info *) buf;
2243 struct inquiry_entry *e;
2244 int copied = 0;
2245
Johan Hedberg561aafb2012-01-04 13:31:59 +02002246 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002248
2249 if (copied >= num)
2250 break;
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 bacpy(&info->bdaddr, &data->bdaddr);
2253 info->pscan_rep_mode = data->pscan_rep_mode;
2254 info->pscan_period_mode = data->pscan_period_mode;
2255 info->pscan_mode = data->pscan_mode;
2256 memcpy(info->dev_class, data->dev_class, 3);
2257 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002260 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 }
2262
2263 BT_DBG("cache %p, copied %d", cache, copied);
2264 return copied;
2265}
2266
Johan Hedberg42c6b122013-03-05 20:37:49 +02002267static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268{
2269 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002270 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 struct hci_cp_inquiry cp;
2272
2273 BT_DBG("%s", hdev->name);
2274
2275 if (test_bit(HCI_INQUIRY, &hdev->flags))
2276 return;
2277
2278 /* Start Inquiry */
2279 memcpy(&cp.lap, &ir->lap, 3);
2280 cp.length = ir->length;
2281 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002282 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283}
2284
2285int hci_inquiry(void __user *arg)
2286{
2287 __u8 __user *ptr = arg;
2288 struct hci_inquiry_req ir;
2289 struct hci_dev *hdev;
2290 int err = 0, do_inquiry = 0, max_rsp;
2291 long timeo;
2292 __u8 *buf;
2293
2294 if (copy_from_user(&ir, ptr, sizeof(ir)))
2295 return -EFAULT;
2296
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002297 hdev = hci_dev_get(ir.dev_id);
2298 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 return -ENODEV;
2300
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002301 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2302 err = -EBUSY;
2303 goto done;
2304 }
2305
Marcel Holtmann4a964402014-07-02 19:10:33 +02002306 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002307 err = -EOPNOTSUPP;
2308 goto done;
2309 }
2310
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002311 if (hdev->dev_type != HCI_BREDR) {
2312 err = -EOPNOTSUPP;
2313 goto done;
2314 }
2315
Johan Hedberg56f87902013-10-02 13:43:13 +03002316 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2317 err = -EOPNOTSUPP;
2318 goto done;
2319 }
2320
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002321 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002322 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002323 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002324 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 do_inquiry = 1;
2326 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002327 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
Marcel Holtmann04837f62006-07-03 10:02:33 +02002329 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002330
2331 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002332 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2333 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002334 if (err < 0)
2335 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002336
2337 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2338 * cleared). If it is interrupted by a signal, return -EINTR.
2339 */
NeilBrown74316202014-07-07 15:16:04 +10002340 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002341 TASK_INTERRUPTIBLE))
2342 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002345 /* for unlimited number of responses we will use buffer with
2346 * 255 entries
2347 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2349
2350 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2351 * copy it to the user space.
2352 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002353 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002354 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 err = -ENOMEM;
2356 goto done;
2357 }
2358
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002359 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002361 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
2363 BT_DBG("num_rsp %d", ir.num_rsp);
2364
2365 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2366 ptr += sizeof(ir);
2367 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002368 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002370 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 err = -EFAULT;
2372
2373 kfree(buf);
2374
2375done:
2376 hci_dev_put(hdev);
2377 return err;
2378}
2379
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002380static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 int ret = 0;
2383
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 BT_DBG("%s %p", hdev->name, hdev);
2385
2386 hci_req_lock(hdev);
2387
Johan Hovold94324962012-03-15 14:48:41 +01002388 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2389 ret = -ENODEV;
2390 goto done;
2391 }
2392
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002393 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2394 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002395 /* Check for rfkill but allow the HCI setup stage to
2396 * proceed (which in itself doesn't cause any RF activity).
2397 */
2398 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2399 ret = -ERFKILL;
2400 goto done;
2401 }
2402
2403 /* Check for valid public address or a configured static
2404 * random adddress, but let the HCI setup proceed to
2405 * be able to determine if there is a public address
2406 * or not.
2407 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002408 * In case of user channel usage, it is not important
2409 * if a public address or static random address is
2410 * available.
2411 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002412 * This check is only valid for BR/EDR controllers
2413 * since AMP controllers do not have an address.
2414 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002415 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2416 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002417 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2418 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2419 ret = -EADDRNOTAVAIL;
2420 goto done;
2421 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002422 }
2423
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 if (test_bit(HCI_UP, &hdev->flags)) {
2425 ret = -EALREADY;
2426 goto done;
2427 }
2428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 if (hdev->open(hdev)) {
2430 ret = -EIO;
2431 goto done;
2432 }
2433
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002434 atomic_set(&hdev->cmd_cnt, 1);
2435 set_bit(HCI_INIT, &hdev->flags);
2436
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2438 if (hdev->setup)
2439 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002440
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002441 /* The transport driver can set these quirks before
2442 * creating the HCI device or in its setup callback.
2443 *
2444 * In case any of them is set, the controller has to
2445 * start up as unconfigured.
2446 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002447 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2448 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002449 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002450
2451 /* For an unconfigured controller it is required to
2452 * read at least the version information provided by
2453 * the Read Local Version Information command.
2454 *
2455 * If the set_bdaddr driver callback is provided, then
2456 * also the original Bluetooth public device address
2457 * will be read using the Read BD Address command.
2458 */
2459 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2460 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002461 }
2462
Marcel Holtmann9713c172014-07-06 12:11:15 +02002463 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2464 /* If public address change is configured, ensure that
2465 * the address gets programmed. If the driver does not
2466 * support changing the public address, fail the power
2467 * on procedure.
2468 */
2469 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2470 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002471 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2472 else
2473 ret = -EADDRNOTAVAIL;
2474 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002475
2476 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002477 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002478 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002479 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 }
2481
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002482 clear_bit(HCI_INIT, &hdev->flags);
2483
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 if (!ret) {
2485 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002486 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 set_bit(HCI_UP, &hdev->flags);
2488 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002489 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002490 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002491 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002492 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002493 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002494 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002495 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002496 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002497 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002498 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002500 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002501 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002502 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
2504 skb_queue_purge(&hdev->cmd_q);
2505 skb_queue_purge(&hdev->rx_q);
2506
2507 if (hdev->flush)
2508 hdev->flush(hdev);
2509
2510 if (hdev->sent_cmd) {
2511 kfree_skb(hdev->sent_cmd);
2512 hdev->sent_cmd = NULL;
2513 }
2514
2515 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002516 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 }
2518
2519done:
2520 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 return ret;
2522}
2523
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002524/* ---- HCI ioctl helpers ---- */
2525
2526int hci_dev_open(__u16 dev)
2527{
2528 struct hci_dev *hdev;
2529 int err;
2530
2531 hdev = hci_dev_get(dev);
2532 if (!hdev)
2533 return -ENODEV;
2534
Marcel Holtmann4a964402014-07-02 19:10:33 +02002535 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002536 * up as user channel. Trying to bring them up as normal devices
2537 * will result into a failure. Only user channel operation is
2538 * possible.
2539 *
2540 * When this function is called for a user channel, the flag
2541 * HCI_USER_CHANNEL will be set first before attempting to
2542 * open the device.
2543 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002544 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002545 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2546 err = -EOPNOTSUPP;
2547 goto done;
2548 }
2549
Johan Hedberge1d08f42013-10-01 22:44:50 +03002550 /* We need to ensure that no other power on/off work is pending
2551 * before proceeding to call hci_dev_do_open. This is
2552 * particularly important if the setup procedure has not yet
2553 * completed.
2554 */
2555 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2556 cancel_delayed_work(&hdev->power_off);
2557
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002558 /* After this call it is guaranteed that the setup procedure
2559 * has finished. This means that error conditions like RFKILL
2560 * or no valid public or static random address apply.
2561 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002562 flush_workqueue(hdev->req_workqueue);
2563
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002564 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002565 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002566 * so that pairing works for them. Once the management interface
2567 * is in use this bit will be cleared again and userspace has
2568 * to explicitly enable it.
2569 */
2570 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2571 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002572 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002573
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002574 err = hci_dev_do_open(hdev);
2575
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002576done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002577 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002578 return err;
2579}
2580
Johan Hedbergd7347f32014-07-04 12:37:23 +03002581/* This function requires the caller holds hdev->lock */
2582static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2583{
2584 struct hci_conn_params *p;
2585
Johan Hedbergf161dd42014-08-15 21:06:54 +03002586 list_for_each_entry(p, &hdev->le_conn_params, list) {
2587 if (p->conn) {
2588 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002589 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002590 p->conn = NULL;
2591 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002592 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002593 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002594
2595 BT_DBG("All LE pending actions cleared");
2596}
2597
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598static int hci_dev_do_close(struct hci_dev *hdev)
2599{
2600 BT_DBG("%s %p", hdev->name, hdev);
2601
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002602 cancel_delayed_work(&hdev->power_off);
2603
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 hci_req_cancel(hdev, ENODEV);
2605 hci_req_lock(hdev);
2606
2607 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002608 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 hci_req_unlock(hdev);
2610 return 0;
2611 }
2612
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002613 /* Flush RX and TX works */
2614 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002615 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002617 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002618 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002619 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002620 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002621 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002622 }
2623
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002624 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002625 cancel_delayed_work(&hdev->service_cache);
2626
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002627 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002628
2629 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2630 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002631
Johan Hedberg76727c02014-11-18 09:00:14 +02002632 /* Avoid potential lockdep warnings from the *_flush() calls by
2633 * ensuring the workqueue is empty up front.
2634 */
2635 drain_workqueue(hdev->workqueue);
2636
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002637 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02002638
2639 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2640 if (hdev->dev_type == HCI_BREDR)
2641 mgmt_powered(hdev, 0);
2642 }
2643
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002644 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002645 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002646 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002647 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
2649 hci_notify(hdev, HCI_DEV_DOWN);
2650
2651 if (hdev->flush)
2652 hdev->flush(hdev);
2653
2654 /* Reset device */
2655 skb_queue_purge(&hdev->cmd_q);
2656 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002657 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2658 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002659 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002661 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 clear_bit(HCI_INIT, &hdev->flags);
2663 }
2664
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002665 /* flush cmd work */
2666 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
2668 /* Drop queues */
2669 skb_queue_purge(&hdev->rx_q);
2670 skb_queue_purge(&hdev->cmd_q);
2671 skb_queue_purge(&hdev->raw_q);
2672
2673 /* Drop last sent command */
2674 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002675 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 kfree_skb(hdev->sent_cmd);
2677 hdev->sent_cmd = NULL;
2678 }
2679
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002680 kfree_skb(hdev->recv_evt);
2681 hdev->recv_evt = NULL;
2682
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 /* After this point our queues are empty
2684 * and no tasks are scheduled. */
2685 hdev->close(hdev);
2686
Johan Hedberg35b973c2013-03-15 17:06:59 -05002687 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002688 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002689 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2690
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002691 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002692 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002693
Johan Hedberge59fda82012-02-22 18:11:53 +02002694 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002695 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002696 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002697
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 hci_req_unlock(hdev);
2699
2700 hci_dev_put(hdev);
2701 return 0;
2702}
2703
2704int hci_dev_close(__u16 dev)
2705{
2706 struct hci_dev *hdev;
2707 int err;
2708
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002709 hdev = hci_dev_get(dev);
2710 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002712
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002713 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2714 err = -EBUSY;
2715 goto done;
2716 }
2717
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002718 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2719 cancel_delayed_work(&hdev->power_off);
2720
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002722
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002723done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 hci_dev_put(hdev);
2725 return err;
2726}
2727
2728int hci_dev_reset(__u16 dev)
2729{
2730 struct hci_dev *hdev;
2731 int ret = 0;
2732
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002733 hdev = hci_dev_get(dev);
2734 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 return -ENODEV;
2736
2737 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738
Marcel Holtmann808a0492013-08-26 20:57:58 -07002739 if (!test_bit(HCI_UP, &hdev->flags)) {
2740 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002744 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2745 ret = -EBUSY;
2746 goto done;
2747 }
2748
Marcel Holtmann4a964402014-07-02 19:10:33 +02002749 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002750 ret = -EOPNOTSUPP;
2751 goto done;
2752 }
2753
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 /* Drop queues */
2755 skb_queue_purge(&hdev->rx_q);
2756 skb_queue_purge(&hdev->cmd_q);
2757
Johan Hedberg76727c02014-11-18 09:00:14 +02002758 /* Avoid potential lockdep warnings from the *_flush() calls by
2759 * ensuring the workqueue is empty up front.
2760 */
2761 drain_workqueue(hdev->workqueue);
2762
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002763 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002764 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002766 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
2768 if (hdev->flush)
2769 hdev->flush(hdev);
2770
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002771 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002772 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002774 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775
2776done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 hci_req_unlock(hdev);
2778 hci_dev_put(hdev);
2779 return ret;
2780}
2781
2782int hci_dev_reset_stat(__u16 dev)
2783{
2784 struct hci_dev *hdev;
2785 int ret = 0;
2786
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002787 hdev = hci_dev_get(dev);
2788 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 return -ENODEV;
2790
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002791 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2792 ret = -EBUSY;
2793 goto done;
2794 }
2795
Marcel Holtmann4a964402014-07-02 19:10:33 +02002796 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002797 ret = -EOPNOTSUPP;
2798 goto done;
2799 }
2800
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2802
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002803done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 return ret;
2806}
2807
Johan Hedberg123abc02014-07-10 12:09:07 +03002808static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2809{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002810 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002811
2812 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2813
2814 if ((scan & SCAN_PAGE))
2815 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2816 &hdev->dev_flags);
2817 else
2818 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2819 &hdev->dev_flags);
2820
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002821 if ((scan & SCAN_INQUIRY)) {
2822 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2823 &hdev->dev_flags);
2824 } else {
2825 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2826 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2827 &hdev->dev_flags);
2828 }
2829
Johan Hedberg123abc02014-07-10 12:09:07 +03002830 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2831 return;
2832
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002833 if (conn_changed || discov_changed) {
2834 /* In case this was disabled through mgmt */
2835 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2836
2837 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2838 mgmt_update_adv_data(hdev);
2839
Johan Hedberg123abc02014-07-10 12:09:07 +03002840 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002841 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002842}
2843
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844int hci_dev_cmd(unsigned int cmd, void __user *arg)
2845{
2846 struct hci_dev *hdev;
2847 struct hci_dev_req dr;
2848 int err = 0;
2849
2850 if (copy_from_user(&dr, arg, sizeof(dr)))
2851 return -EFAULT;
2852
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002853 hdev = hci_dev_get(dr.dev_id);
2854 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 return -ENODEV;
2856
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002857 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2858 err = -EBUSY;
2859 goto done;
2860 }
2861
Marcel Holtmann4a964402014-07-02 19:10:33 +02002862 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002863 err = -EOPNOTSUPP;
2864 goto done;
2865 }
2866
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002867 if (hdev->dev_type != HCI_BREDR) {
2868 err = -EOPNOTSUPP;
2869 goto done;
2870 }
2871
Johan Hedberg56f87902013-10-02 13:43:13 +03002872 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2873 err = -EOPNOTSUPP;
2874 goto done;
2875 }
2876
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 switch (cmd) {
2878 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002879 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2880 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 break;
2882
2883 case HCISETENCRYPT:
2884 if (!lmp_encrypt_capable(hdev)) {
2885 err = -EOPNOTSUPP;
2886 break;
2887 }
2888
2889 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2890 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002891 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2892 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 if (err)
2894 break;
2895 }
2896
Johan Hedberg01178cd2013-03-05 20:37:41 +02002897 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2898 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 break;
2900
2901 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002902 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2903 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002904
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002905 /* Ensure that the connectable and discoverable states
2906 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002907 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002908 if (!err)
2909 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 break;
2911
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002912 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002913 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2914 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002915 break;
2916
2917 case HCISETLINKMODE:
2918 hdev->link_mode = ((__u16) dr.dev_opt) &
2919 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2920 break;
2921
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 case HCISETPTYPE:
2923 hdev->pkt_type = (__u16) dr.dev_opt;
2924 break;
2925
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002927 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2928 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 break;
2930
2931 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002932 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2933 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 break;
2935
2936 default:
2937 err = -EINVAL;
2938 break;
2939 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002940
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002941done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 hci_dev_put(hdev);
2943 return err;
2944}
2945
2946int hci_get_dev_list(void __user *arg)
2947{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002948 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 struct hci_dev_list_req *dl;
2950 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 int n = 0, size, err;
2952 __u16 dev_num;
2953
2954 if (get_user(dev_num, (__u16 __user *) arg))
2955 return -EFAULT;
2956
2957 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2958 return -EINVAL;
2959
2960 size = sizeof(*dl) + dev_num * sizeof(*dr);
2961
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002962 dl = kzalloc(size, GFP_KERNEL);
2963 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 return -ENOMEM;
2965
2966 dr = dl->dev_req;
2967
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002968 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002969 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002970 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002971
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002972 /* When the auto-off is configured it means the transport
2973 * is running, but in that case still indicate that the
2974 * device is actually down.
2975 */
2976 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2977 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002978
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002980 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002981
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 if (++n >= dev_num)
2983 break;
2984 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002985 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986
2987 dl->dev_num = n;
2988 size = sizeof(*dl) + n * sizeof(*dr);
2989
2990 err = copy_to_user(arg, dl, size);
2991 kfree(dl);
2992
2993 return err ? -EFAULT : 0;
2994}
2995
2996int hci_get_dev_info(void __user *arg)
2997{
2998 struct hci_dev *hdev;
2999 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003000 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 int err = 0;
3002
3003 if (copy_from_user(&di, arg, sizeof(di)))
3004 return -EFAULT;
3005
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003006 hdev = hci_dev_get(di.dev_id);
3007 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008 return -ENODEV;
3009
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003010 /* When the auto-off is configured it means the transport
3011 * is running, but in that case still indicate that the
3012 * device is actually down.
3013 */
3014 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3015 flags = hdev->flags & ~BIT(HCI_UP);
3016 else
3017 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02003018
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 strcpy(di.name, hdev->name);
3020 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07003021 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003022 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03003024 if (lmp_bredr_capable(hdev)) {
3025 di.acl_mtu = hdev->acl_mtu;
3026 di.acl_pkts = hdev->acl_pkts;
3027 di.sco_mtu = hdev->sco_mtu;
3028 di.sco_pkts = hdev->sco_pkts;
3029 } else {
3030 di.acl_mtu = hdev->le_mtu;
3031 di.acl_pkts = hdev->le_pkts;
3032 di.sco_mtu = 0;
3033 di.sco_pkts = 0;
3034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 di.link_policy = hdev->link_policy;
3036 di.link_mode = hdev->link_mode;
3037
3038 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3039 memcpy(&di.features, &hdev->features, sizeof(di.features));
3040
3041 if (copy_to_user(arg, &di, sizeof(di)))
3042 err = -EFAULT;
3043
3044 hci_dev_put(hdev);
3045
3046 return err;
3047}
3048
3049/* ---- Interface to HCI drivers ---- */
3050
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003051static int hci_rfkill_set_block(void *data, bool blocked)
3052{
3053 struct hci_dev *hdev = data;
3054
3055 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3056
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003057 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3058 return -EBUSY;
3059
Johan Hedberg5e130362013-09-13 08:58:17 +03003060 if (blocked) {
3061 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003062 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3063 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003064 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003065 } else {
3066 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003067 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003068
3069 return 0;
3070}
3071
3072static const struct rfkill_ops hci_rfkill_ops = {
3073 .set_block = hci_rfkill_set_block,
3074};
3075
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003076static void hci_power_on(struct work_struct *work)
3077{
3078 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003079 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003080
3081 BT_DBG("%s", hdev->name);
3082
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003083 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003084 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05303085 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003086 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05303087 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003088 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003089 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003090
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003091 /* During the HCI setup phase, a few error conditions are
3092 * ignored and they need to be checked now. If they are still
3093 * valid, it is important to turn the device back off.
3094 */
3095 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003096 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003097 (hdev->dev_type == HCI_BREDR &&
3098 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3099 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003100 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3101 hci_dev_do_close(hdev);
3102 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003103 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3104 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003105 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003106
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003107 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003108 /* For unconfigured devices, set the HCI_RAW flag
3109 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003110 */
3111 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3112 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003113
3114 /* For fully configured devices, this will send
3115 * the Index Added event. For unconfigured devices,
3116 * it will send Unconfigued Index Added event.
3117 *
3118 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3119 * and no event will be send.
3120 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003121 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003122 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003123 /* When the controller is now configured, then it
3124 * is important to clear the HCI_RAW flag.
3125 */
3126 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3127 clear_bit(HCI_RAW, &hdev->flags);
3128
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003129 /* Powering on the controller with HCI_CONFIG set only
3130 * happens with the transition from unconfigured to
3131 * configured. This will send the Index Added event.
3132 */
3133 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003134 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003135}
3136
3137static void hci_power_off(struct work_struct *work)
3138{
Johan Hedberg32435532011-11-07 22:16:04 +02003139 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003140 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003141
3142 BT_DBG("%s", hdev->name);
3143
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003144 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003145}
3146
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003147static void hci_discov_off(struct work_struct *work)
3148{
3149 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003150
3151 hdev = container_of(work, struct hci_dev, discov_off.work);
3152
3153 BT_DBG("%s", hdev->name);
3154
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003155 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003156}
3157
Johan Hedberg35f74982014-02-18 17:14:32 +02003158void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003159{
Johan Hedberg48210022013-01-27 00:31:28 +02003160 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003161
Johan Hedberg48210022013-01-27 00:31:28 +02003162 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3163 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003164 kfree(uuid);
3165 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003166}
3167
Johan Hedberg35f74982014-02-18 17:14:32 +02003168void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003169{
Johan Hedberg0378b592014-11-19 15:22:22 +02003170 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003171
Johan Hedberg0378b592014-11-19 15:22:22 +02003172 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3173 list_del_rcu(&key->list);
3174 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003175 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003176}
3177
Johan Hedberg35f74982014-02-18 17:14:32 +02003178void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003179{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003180 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003181
Johan Hedberg970d0f12014-11-13 14:37:47 +02003182 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3183 list_del_rcu(&k->list);
3184 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003185 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003186}
3187
Johan Hedberg970c4e42014-02-18 10:19:33 +02003188void hci_smp_irks_clear(struct hci_dev *hdev)
3189{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003190 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003191
Johan Hedbergadae20c2014-11-13 14:37:48 +02003192 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3193 list_del_rcu(&k->list);
3194 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003195 }
3196}
3197
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003198struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3199{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003200 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003201
Johan Hedberg0378b592014-11-19 15:22:22 +02003202 rcu_read_lock();
3203 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3204 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3205 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003206 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003207 }
3208 }
3209 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003210
3211 return NULL;
3212}
3213
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303214static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003215 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003216{
3217 /* Legacy key */
3218 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303219 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003220
3221 /* Debug keys are insecure so don't store them persistently */
3222 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303223 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003224
3225 /* Changed combination key and there's no previous one */
3226 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303227 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003228
3229 /* Security mode 3 case */
3230 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303231 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003232
Johan Hedberge3befab2014-06-01 16:33:39 +03003233 /* BR/EDR key derived using SC from an LE link */
3234 if (conn->type == LE_LINK)
3235 return true;
3236
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003237 /* Neither local nor remote side had no-bonding as requirement */
3238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303239 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003240
3241 /* Local side had dedicated bonding as requirement */
3242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303243 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003244
3245 /* Remote side had dedicated bonding as requirement */
3246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303247 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003248
3249 /* If none of the above criteria match, then don't store the key
3250 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303251 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003252}
3253
Johan Hedberge804d252014-07-16 11:42:28 +03003254static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003255{
Johan Hedberge804d252014-07-16 11:42:28 +03003256 if (type == SMP_LTK)
3257 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003258
Johan Hedberge804d252014-07-16 11:42:28 +03003259 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003260}
3261
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003262struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3263 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003264{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003265 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003266
Johan Hedberg970d0f12014-11-13 14:37:47 +02003267 rcu_read_lock();
3268 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003269 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3270 continue;
3271
Johan Hedberg923e2412014-12-03 12:43:39 +02003272 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003273 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003274 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003275 }
3276 }
3277 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003278
3279 return NULL;
3280}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003281
Johan Hedberg970c4e42014-02-18 10:19:33 +02003282struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3283{
3284 struct smp_irk *irk;
3285
Johan Hedbergadae20c2014-11-13 14:37:48 +02003286 rcu_read_lock();
3287 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3288 if (!bacmp(&irk->rpa, rpa)) {
3289 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003290 return irk;
3291 }
3292 }
3293
Johan Hedbergadae20c2014-11-13 14:37:48 +02003294 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3295 if (smp_irk_matches(hdev, irk->val, rpa)) {
3296 bacpy(&irk->rpa, rpa);
3297 rcu_read_unlock();
3298 return irk;
3299 }
3300 }
3301 rcu_read_unlock();
3302
Johan Hedberg970c4e42014-02-18 10:19:33 +02003303 return NULL;
3304}
3305
3306struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3307 u8 addr_type)
3308{
3309 struct smp_irk *irk;
3310
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003311 /* Identity Address must be public or static random */
3312 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3313 return NULL;
3314
Johan Hedbergadae20c2014-11-13 14:37:48 +02003315 rcu_read_lock();
3316 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003317 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003318 bacmp(bdaddr, &irk->bdaddr) == 0) {
3319 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003320 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003321 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003322 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003323 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003324
3325 return NULL;
3326}
3327
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003328struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003329 bdaddr_t *bdaddr, u8 *val, u8 type,
3330 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003331{
3332 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303333 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003334
3335 old_key = hci_find_link_key(hdev, bdaddr);
3336 if (old_key) {
3337 old_key_type = old_key->type;
3338 key = old_key;
3339 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003340 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003341 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003342 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003343 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003344 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003345 }
3346
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003347 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003348
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003349 /* Some buggy controller combinations generate a changed
3350 * combination key for legacy pairing even when there's no
3351 * previous key */
3352 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003353 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003354 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003355 if (conn)
3356 conn->key_type = type;
3357 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003358
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003359 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003360 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003361 key->pin_len = pin_len;
3362
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003363 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003364 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003365 else
3366 key->type = type;
3367
Johan Hedberg7652ff62014-06-24 13:15:49 +03003368 if (persistent)
3369 *persistent = hci_persistent_key(hdev, conn, type,
3370 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003371
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003372 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003373}
3374
Johan Hedbergca9142b2014-02-19 14:57:44 +02003375struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003376 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003377 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003378{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003379 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003380 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003381
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003382 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003383 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003384 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003385 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003386 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003387 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003388 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003389 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003390 }
3391
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003392 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003393 key->bdaddr_type = addr_type;
3394 memcpy(key->val, tk, sizeof(key->val));
3395 key->authenticated = authenticated;
3396 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003397 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003398 key->enc_size = enc_size;
3399 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003400
Johan Hedbergca9142b2014-02-19 14:57:44 +02003401 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003402}
3403
Johan Hedbergca9142b2014-02-19 14:57:44 +02003404struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3405 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003406{
3407 struct smp_irk *irk;
3408
3409 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3410 if (!irk) {
3411 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3412 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003413 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003414
3415 bacpy(&irk->bdaddr, bdaddr);
3416 irk->addr_type = addr_type;
3417
Johan Hedbergadae20c2014-11-13 14:37:48 +02003418 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003419 }
3420
3421 memcpy(irk->val, val, 16);
3422 bacpy(&irk->rpa, rpa);
3423
Johan Hedbergca9142b2014-02-19 14:57:44 +02003424 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003425}
3426
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003427int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3428{
3429 struct link_key *key;
3430
3431 key = hci_find_link_key(hdev, bdaddr);
3432 if (!key)
3433 return -ENOENT;
3434
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003435 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003436
Johan Hedberg0378b592014-11-19 15:22:22 +02003437 list_del_rcu(&key->list);
3438 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003439
3440 return 0;
3441}
3442
Johan Hedberge0b2b272014-02-18 17:14:31 +02003443int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003444{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003445 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003446 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003447
Johan Hedberg970d0f12014-11-13 14:37:47 +02003448 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003449 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003450 continue;
3451
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003452 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003453
Johan Hedberg970d0f12014-11-13 14:37:47 +02003454 list_del_rcu(&k->list);
3455 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003456 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003457 }
3458
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003459 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003460}
3461
Johan Hedberga7ec7332014-02-18 17:14:35 +02003462void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3463{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003464 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003465
Johan Hedbergadae20c2014-11-13 14:37:48 +02003466 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003467 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3468 continue;
3469
3470 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3471
Johan Hedbergadae20c2014-11-13 14:37:48 +02003472 list_del_rcu(&k->list);
3473 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003474 }
3475}
3476
Ville Tervo6bd32322011-02-16 16:32:41 +02003477/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003478static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003479{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003480 struct hci_dev *hdev = container_of(work, struct hci_dev,
3481 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003482
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003483 if (hdev->sent_cmd) {
3484 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3485 u16 opcode = __le16_to_cpu(sent->opcode);
3486
3487 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3488 } else {
3489 BT_ERR("%s command tx timeout", hdev->name);
3490 }
3491
Ville Tervo6bd32322011-02-16 16:32:41 +02003492 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003493 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003494}
3495
Szymon Janc2763eda2011-03-22 13:12:22 +01003496struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01003497 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003498{
3499 struct oob_data *data;
3500
Johan Hedberg6928a922014-10-26 20:46:09 +01003501 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3502 if (bacmp(bdaddr, &data->bdaddr) != 0)
3503 continue;
3504 if (data->bdaddr_type != bdaddr_type)
3505 continue;
3506 return data;
3507 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003508
3509 return NULL;
3510}
3511
Johan Hedberg6928a922014-10-26 20:46:09 +01003512int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3513 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003514{
3515 struct oob_data *data;
3516
Johan Hedberg6928a922014-10-26 20:46:09 +01003517 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003518 if (!data)
3519 return -ENOENT;
3520
Johan Hedberg6928a922014-10-26 20:46:09 +01003521 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003522
3523 list_del(&data->list);
3524 kfree(data);
3525
3526 return 0;
3527}
3528
Johan Hedberg35f74982014-02-18 17:14:32 +02003529void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003530{
3531 struct oob_data *data, *n;
3532
3533 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3534 list_del(&data->list);
3535 kfree(data);
3536 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003537}
3538
Marcel Holtmann07988722014-01-10 02:07:29 -08003539int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01003540 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003541 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01003542{
3543 struct oob_data *data;
3544
Johan Hedberg6928a922014-10-26 20:46:09 +01003545 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003546 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003547 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003548 if (!data)
3549 return -ENOMEM;
3550
3551 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01003552 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01003553 list_add(&data->list, &hdev->remote_oob_data);
3554 }
3555
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003556 if (hash192 && rand192) {
3557 memcpy(data->hash192, hash192, sizeof(data->hash192));
3558 memcpy(data->rand192, rand192, sizeof(data->rand192));
3559 } else {
3560 memset(data->hash192, 0, sizeof(data->hash192));
3561 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003562 }
3563
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003564 if (hash256 && rand256) {
3565 memcpy(data->hash256, hash256, sizeof(data->hash256));
3566 memcpy(data->rand256, rand256, sizeof(data->rand256));
3567 } else {
3568 memset(data->hash256, 0, sizeof(data->hash256));
3569 memset(data->rand256, 0, sizeof(data->rand256));
3570 }
Marcel Holtmann07988722014-01-10 02:07:29 -08003571
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003572 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003573
3574 return 0;
3575}
3576
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003577struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003578 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003579{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003580 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003581
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003582 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003583 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003584 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003585 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003586
3587 return NULL;
3588}
3589
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003590void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003591{
3592 struct list_head *p, *n;
3593
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003594 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003595 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003596
3597 list_del(p);
3598 kfree(b);
3599 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003600}
3601
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003602int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003603{
3604 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003605
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003606 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003607 return -EBADF;
3608
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003609 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003610 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003611
Johan Hedberg27f70f32014-07-21 10:50:06 +03003612 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003613 if (!entry)
3614 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003615
3616 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003617 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003618
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003619 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003620
3621 return 0;
3622}
3623
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003624int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003625{
3626 struct bdaddr_list *entry;
3627
Johan Hedberg35f74982014-02-18 17:14:32 +02003628 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003629 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003630 return 0;
3631 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003632
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003633 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003634 if (!entry)
3635 return -ENOENT;
3636
3637 list_del(&entry->list);
3638 kfree(entry);
3639
3640 return 0;
3641}
3642
Andre Guedes15819a72014-02-03 13:56:18 -03003643/* This function requires the caller holds hdev->lock */
3644struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3645 bdaddr_t *addr, u8 addr_type)
3646{
3647 struct hci_conn_params *params;
3648
Johan Hedberg738f6182014-07-03 19:33:51 +03003649 /* The conn params list only contains identity addresses */
3650 if (!hci_is_identity_address(addr, addr_type))
3651 return NULL;
3652
Andre Guedes15819a72014-02-03 13:56:18 -03003653 list_for_each_entry(params, &hdev->le_conn_params, list) {
3654 if (bacmp(&params->addr, addr) == 0 &&
3655 params->addr_type == addr_type) {
3656 return params;
3657 }
3658 }
3659
3660 return NULL;
3661}
3662
Andre Guedescef952c2014-02-26 20:21:49 -03003663static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3664{
3665 struct hci_conn *conn;
3666
3667 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3668 if (!conn)
3669 return false;
3670
3671 if (conn->dst_type != type)
3672 return false;
3673
3674 if (conn->state != BT_CONNECTED)
3675 return false;
3676
3677 return true;
3678}
3679
Andre Guedes15819a72014-02-03 13:56:18 -03003680/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003681struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3682 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003683{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003684 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003685
Johan Hedberg738f6182014-07-03 19:33:51 +03003686 /* The list only contains identity addresses */
3687 if (!hci_is_identity_address(addr, addr_type))
3688 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003689
Johan Hedberg501f8822014-07-04 12:37:26 +03003690 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003691 if (bacmp(&param->addr, addr) == 0 &&
3692 param->addr_type == addr_type)
3693 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003694 }
3695
3696 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003697}
3698
3699/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003700struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3701 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003702{
3703 struct hci_conn_params *params;
3704
Johan Hedbergc46245b2014-07-02 17:37:33 +03003705 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003706 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003707
Andre Guedes15819a72014-02-03 13:56:18 -03003708 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003709 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003710 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003711
3712 params = kzalloc(sizeof(*params), GFP_KERNEL);
3713 if (!params) {
3714 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003715 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003716 }
3717
3718 bacpy(&params->addr, addr);
3719 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003720
3721 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003722 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003723
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003724 params->conn_min_interval = hdev->le_conn_min_interval;
3725 params->conn_max_interval = hdev->le_conn_max_interval;
3726 params->conn_latency = hdev->le_conn_latency;
3727 params->supervision_timeout = hdev->le_supv_timeout;
3728 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3729
3730 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3731
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003732 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003733}
3734
3735/* This function requires the caller holds hdev->lock */
3736int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003737 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003738{
3739 struct hci_conn_params *params;
3740
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003741 params = hci_conn_params_add(hdev, addr, addr_type);
3742 if (!params)
3743 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003744
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003745 if (params->auto_connect == auto_connect)
3746 return 0;
3747
Johan Hedberg95305ba2014-07-04 12:37:21 +03003748 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003749
Andre Guedescef952c2014-02-26 20:21:49 -03003750 switch (auto_connect) {
3751 case HCI_AUTO_CONN_DISABLED:
3752 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003753 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003754 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003755 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003756 list_add(&params->action, &hdev->pend_le_reports);
3757 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003758 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003759 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003760 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003761 if (!is_connected(hdev, addr, addr_type)) {
3762 list_add(&params->action, &hdev->pend_le_conns);
3763 hci_update_background_scan(hdev);
3764 }
Andre Guedescef952c2014-02-26 20:21:49 -03003765 break;
3766 }
Andre Guedes15819a72014-02-03 13:56:18 -03003767
Johan Hedberg851efca2014-07-02 22:42:00 +03003768 params->auto_connect = auto_connect;
3769
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003770 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3771 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003772
3773 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003774}
3775
Johan Hedbergf6c63242014-08-15 21:06:59 +03003776static void hci_conn_params_free(struct hci_conn_params *params)
3777{
3778 if (params->conn) {
3779 hci_conn_drop(params->conn);
3780 hci_conn_put(params->conn);
3781 }
3782
3783 list_del(&params->action);
3784 list_del(&params->list);
3785 kfree(params);
3786}
3787
Andre Guedes15819a72014-02-03 13:56:18 -03003788/* This function requires the caller holds hdev->lock */
3789void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3790{
3791 struct hci_conn_params *params;
3792
3793 params = hci_conn_params_lookup(hdev, addr, addr_type);
3794 if (!params)
3795 return;
3796
Johan Hedbergf6c63242014-08-15 21:06:59 +03003797 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003798
Johan Hedberg95305ba2014-07-04 12:37:21 +03003799 hci_update_background_scan(hdev);
3800
Andre Guedes15819a72014-02-03 13:56:18 -03003801 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3802}
3803
3804/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003805void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003806{
3807 struct hci_conn_params *params, *tmp;
3808
3809 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003810 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3811 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003812 list_del(&params->list);
3813 kfree(params);
3814 }
3815
Johan Hedberg55af49a82014-07-02 17:37:26 +03003816 BT_DBG("All LE disabled connection parameters were removed");
3817}
3818
3819/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003820void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003821{
3822 struct hci_conn_params *params, *tmp;
3823
Johan Hedbergf6c63242014-08-15 21:06:59 +03003824 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3825 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003826
Johan Hedberga2f41a82014-07-04 12:37:19 +03003827 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003828
Andre Guedes15819a72014-02-03 13:56:18 -03003829 BT_DBG("All LE connection parameters were removed");
3830}
3831
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003832static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003833{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003834 if (status) {
3835 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003836
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003837 hci_dev_lock(hdev);
3838 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3839 hci_dev_unlock(hdev);
3840 return;
3841 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003842}
3843
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003844static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003845{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003846 /* General inquiry access code (GIAC) */
3847 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3848 struct hci_request req;
3849 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003850 int err;
3851
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003852 if (status) {
3853 BT_ERR("Failed to disable LE scanning: status %d", status);
3854 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003855 }
3856
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003857 switch (hdev->discovery.type) {
3858 case DISCOV_TYPE_LE:
3859 hci_dev_lock(hdev);
3860 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3861 hci_dev_unlock(hdev);
3862 break;
3863
3864 case DISCOV_TYPE_INTERLEAVED:
3865 hci_req_init(&req, hdev);
3866
3867 memset(&cp, 0, sizeof(cp));
3868 memcpy(&cp.lap, lap, sizeof(cp.lap));
3869 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3870 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3871
3872 hci_dev_lock(hdev);
3873
3874 hci_inquiry_cache_flush(hdev);
3875
3876 err = hci_req_run(&req, inquiry_complete);
3877 if (err) {
3878 BT_ERR("Inquiry request failed: err %d", err);
3879 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3880 }
3881
3882 hci_dev_unlock(hdev);
3883 break;
3884 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003885}
3886
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003887static void le_scan_disable_work(struct work_struct *work)
3888{
3889 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003890 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003891 struct hci_request req;
3892 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003893
3894 BT_DBG("%s", hdev->name);
3895
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003896 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003897
Andre Guedesb1efcc22014-02-26 20:21:40 -03003898 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003899
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003900 err = hci_req_run(&req, le_scan_disable_work_complete);
3901 if (err)
3902 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003903}
3904
Johan Hedberga1f4c312014-02-27 14:05:41 +02003905/* Copy the Identity Address of the controller.
3906 *
3907 * If the controller has a public BD_ADDR, then by default use that one.
3908 * If this is a LE only controller without a public address, default to
3909 * the static random address.
3910 *
3911 * For debugging purposes it is possible to force controllers with a
3912 * public address to use the static random address instead.
3913 */
3914void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3915 u8 *bdaddr_type)
3916{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003917 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003918 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3919 bacpy(bdaddr, &hdev->static_addr);
3920 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3921 } else {
3922 bacpy(bdaddr, &hdev->bdaddr);
3923 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3924 }
3925}
3926
David Herrmann9be0dab2012-04-22 14:39:57 +02003927/* Alloc HCI device */
3928struct hci_dev *hci_alloc_dev(void)
3929{
3930 struct hci_dev *hdev;
3931
Johan Hedberg27f70f32014-07-21 10:50:06 +03003932 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003933 if (!hdev)
3934 return NULL;
3935
David Herrmannb1b813d2012-04-22 14:39:58 +02003936 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3937 hdev->esco_type = (ESCO_HV1);
3938 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003939 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3940 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003941 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003942 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3943 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003944
David Herrmannb1b813d2012-04-22 14:39:58 +02003945 hdev->sniff_max_interval = 800;
3946 hdev->sniff_min_interval = 80;
3947
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003948 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003949 hdev->le_adv_min_interval = 0x0800;
3950 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003951 hdev->le_scan_interval = 0x0060;
3952 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003953 hdev->le_conn_min_interval = 0x0028;
3954 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003955 hdev->le_conn_latency = 0x0000;
3956 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003957
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003958 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003959 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003960 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3961 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003962
David Herrmannb1b813d2012-04-22 14:39:58 +02003963 mutex_init(&hdev->lock);
3964 mutex_init(&hdev->req_lock);
3965
3966 INIT_LIST_HEAD(&hdev->mgmt_pending);
3967 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003968 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003969 INIT_LIST_HEAD(&hdev->uuids);
3970 INIT_LIST_HEAD(&hdev->link_keys);
3971 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003972 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003973 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003974 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003975 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003976 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003977 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003978 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003979
3980 INIT_WORK(&hdev->rx_work, hci_rx_work);
3981 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3982 INIT_WORK(&hdev->tx_work, hci_tx_work);
3983 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003984
David Herrmannb1b813d2012-04-22 14:39:58 +02003985 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3986 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3987 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3988
David Herrmannb1b813d2012-04-22 14:39:58 +02003989 skb_queue_head_init(&hdev->rx_q);
3990 skb_queue_head_init(&hdev->cmd_q);
3991 skb_queue_head_init(&hdev->raw_q);
3992
3993 init_waitqueue_head(&hdev->req_wait_q);
3994
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003995 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003996
David Herrmannb1b813d2012-04-22 14:39:58 +02003997 hci_init_sysfs(hdev);
3998 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003999
4000 return hdev;
4001}
4002EXPORT_SYMBOL(hci_alloc_dev);
4003
4004/* Free HCI device */
4005void hci_free_dev(struct hci_dev *hdev)
4006{
David Herrmann9be0dab2012-04-22 14:39:57 +02004007 /* will free via device release */
4008 put_device(&hdev->dev);
4009}
4010EXPORT_SYMBOL(hci_free_dev);
4011
Linus Torvalds1da177e2005-04-16 15:20:36 -07004012/* Register HCI device */
4013int hci_register_dev(struct hci_dev *hdev)
4014{
David Herrmannb1b813d2012-04-22 14:39:58 +02004015 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016
Marcel Holtmann74292d52014-07-06 15:50:27 +02004017 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 return -EINVAL;
4019
Mat Martineau08add512011-11-02 16:18:36 -07004020 /* Do not allow HCI_AMP devices to register at index 0,
4021 * so the index can be used as the AMP controller ID.
4022 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004023 switch (hdev->dev_type) {
4024 case HCI_BREDR:
4025 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4026 break;
4027 case HCI_AMP:
4028 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4029 break;
4030 default:
4031 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004033
Sasha Levin3df92b32012-05-27 22:36:56 +02004034 if (id < 0)
4035 return id;
4036
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037 sprintf(hdev->name, "hci%d", id);
4038 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004039
4040 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4041
Kees Cookd8537542013-07-03 15:04:57 -07004042 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4043 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004044 if (!hdev->workqueue) {
4045 error = -ENOMEM;
4046 goto err;
4047 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004048
Kees Cookd8537542013-07-03 15:04:57 -07004049 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4050 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004051 if (!hdev->req_workqueue) {
4052 destroy_workqueue(hdev->workqueue);
4053 error = -ENOMEM;
4054 goto err;
4055 }
4056
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004057 if (!IS_ERR_OR_NULL(bt_debugfs))
4058 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4059
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004060 dev_set_name(&hdev->dev, "%s", hdev->name);
4061
4062 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004063 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004064 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004066 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004067 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4068 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004069 if (hdev->rfkill) {
4070 if (rfkill_register(hdev->rfkill) < 0) {
4071 rfkill_destroy(hdev->rfkill);
4072 hdev->rfkill = NULL;
4073 }
4074 }
4075
Johan Hedberg5e130362013-09-13 08:58:17 +03004076 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4077 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4078
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004079 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004080 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004081
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004082 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004083 /* Assume BR/EDR support until proven otherwise (such as
4084 * through reading supported features during init.
4085 */
4086 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4087 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004088
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004089 write_lock(&hci_dev_list_lock);
4090 list_add(&hdev->list, &hci_dev_list);
4091 write_unlock(&hci_dev_list_lock);
4092
Marcel Holtmann4a964402014-07-02 19:10:33 +02004093 /* Devices that are marked for raw-only usage are unconfigured
4094 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004095 */
4096 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004097 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004098
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004100 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101
Johan Hedberg19202572013-01-14 22:33:51 +02004102 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004103
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004105
David Herrmann33ca9542011-10-08 14:58:49 +02004106err_wqueue:
4107 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004108 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004109err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004110 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004111
David Herrmann33ca9542011-10-08 14:58:49 +02004112 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113}
4114EXPORT_SYMBOL(hci_register_dev);
4115
4116/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004117void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118{
Sasha Levin3df92b32012-05-27 22:36:56 +02004119 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004120
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004121 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
Johan Hovold94324962012-03-15 14:48:41 +01004123 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4124
Sasha Levin3df92b32012-05-27 22:36:56 +02004125 id = hdev->id;
4126
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004127 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004129 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130
4131 hci_dev_do_close(hdev);
4132
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304133 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004134 kfree_skb(hdev->reassembly[i]);
4135
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004136 cancel_work_sync(&hdev->power_on);
4137
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004138 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004139 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4140 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004141 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004142 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004143 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004144 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004145
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004146 /* mgmt_index_removed should take care of emptying the
4147 * pending list */
4148 BUG_ON(!list_empty(&hdev->mgmt_pending));
4149
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 hci_notify(hdev, HCI_DEV_UNREG);
4151
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004152 if (hdev->rfkill) {
4153 rfkill_unregister(hdev->rfkill);
4154 rfkill_destroy(hdev->rfkill);
4155 }
4156
Johan Hedberg711eafe2014-08-08 09:32:52 +03004157 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004158
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004159 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004160
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004161 debugfs_remove_recursive(hdev->debugfs);
4162
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004163 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004164 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004165
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004166 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004167 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004168 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004169 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004170 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004171 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004172 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004173 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004174 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004175 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01004176 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004177 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004178
David Herrmanndc946bd2012-01-07 15:47:24 +01004179 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004180
4181 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182}
4183EXPORT_SYMBOL(hci_unregister_dev);
4184
4185/* Suspend HCI device */
4186int hci_suspend_dev(struct hci_dev *hdev)
4187{
4188 hci_notify(hdev, HCI_DEV_SUSPEND);
4189 return 0;
4190}
4191EXPORT_SYMBOL(hci_suspend_dev);
4192
4193/* Resume HCI device */
4194int hci_resume_dev(struct hci_dev *hdev)
4195{
4196 hci_notify(hdev, HCI_DEV_RESUME);
4197 return 0;
4198}
4199EXPORT_SYMBOL(hci_resume_dev);
4200
Marcel Holtmann75e05692014-11-02 08:15:38 +01004201/* Reset HCI device */
4202int hci_reset_dev(struct hci_dev *hdev)
4203{
4204 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4205 struct sk_buff *skb;
4206
4207 skb = bt_skb_alloc(3, GFP_ATOMIC);
4208 if (!skb)
4209 return -ENOMEM;
4210
4211 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4212 memcpy(skb_put(skb, 3), hw_err, 3);
4213
4214 /* Send Hardware Error to upper stack */
4215 return hci_recv_frame(hdev, skb);
4216}
4217EXPORT_SYMBOL(hci_reset_dev);
4218
Marcel Holtmann76bca882009-11-18 00:40:39 +01004219/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004220int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004221{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004222 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004223 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004224 kfree_skb(skb);
4225 return -ENXIO;
4226 }
4227
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004228 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004229 bt_cb(skb)->incoming = 1;
4230
4231 /* Time stamp */
4232 __net_timestamp(skb);
4233
Marcel Holtmann76bca882009-11-18 00:40:39 +01004234 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004235 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004236
Marcel Holtmann76bca882009-11-18 00:40:39 +01004237 return 0;
4238}
4239EXPORT_SYMBOL(hci_recv_frame);
4240
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304241static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004242 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304243{
4244 int len = 0;
4245 int hlen = 0;
4246 int remain = count;
4247 struct sk_buff *skb;
4248 struct bt_skb_cb *scb;
4249
4250 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004251 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304252 return -EILSEQ;
4253
4254 skb = hdev->reassembly[index];
4255
4256 if (!skb) {
4257 switch (type) {
4258 case HCI_ACLDATA_PKT:
4259 len = HCI_MAX_FRAME_SIZE;
4260 hlen = HCI_ACL_HDR_SIZE;
4261 break;
4262 case HCI_EVENT_PKT:
4263 len = HCI_MAX_EVENT_SIZE;
4264 hlen = HCI_EVENT_HDR_SIZE;
4265 break;
4266 case HCI_SCODATA_PKT:
4267 len = HCI_MAX_SCO_SIZE;
4268 hlen = HCI_SCO_HDR_SIZE;
4269 break;
4270 }
4271
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004272 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304273 if (!skb)
4274 return -ENOMEM;
4275
4276 scb = (void *) skb->cb;
4277 scb->expect = hlen;
4278 scb->pkt_type = type;
4279
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304280 hdev->reassembly[index] = skb;
4281 }
4282
4283 while (count) {
4284 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004285 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304286
4287 memcpy(skb_put(skb, len), data, len);
4288
4289 count -= len;
4290 data += len;
4291 scb->expect -= len;
4292 remain = count;
4293
4294 switch (type) {
4295 case HCI_EVENT_PKT:
4296 if (skb->len == HCI_EVENT_HDR_SIZE) {
4297 struct hci_event_hdr *h = hci_event_hdr(skb);
4298 scb->expect = h->plen;
4299
4300 if (skb_tailroom(skb) < scb->expect) {
4301 kfree_skb(skb);
4302 hdev->reassembly[index] = NULL;
4303 return -ENOMEM;
4304 }
4305 }
4306 break;
4307
4308 case HCI_ACLDATA_PKT:
4309 if (skb->len == HCI_ACL_HDR_SIZE) {
4310 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4311 scb->expect = __le16_to_cpu(h->dlen);
4312
4313 if (skb_tailroom(skb) < scb->expect) {
4314 kfree_skb(skb);
4315 hdev->reassembly[index] = NULL;
4316 return -ENOMEM;
4317 }
4318 }
4319 break;
4320
4321 case HCI_SCODATA_PKT:
4322 if (skb->len == HCI_SCO_HDR_SIZE) {
4323 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4324 scb->expect = h->dlen;
4325
4326 if (skb_tailroom(skb) < scb->expect) {
4327 kfree_skb(skb);
4328 hdev->reassembly[index] = NULL;
4329 return -ENOMEM;
4330 }
4331 }
4332 break;
4333 }
4334
4335 if (scb->expect == 0) {
4336 /* Complete frame */
4337
4338 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004339 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304340
4341 hdev->reassembly[index] = NULL;
4342 return remain;
4343 }
4344 }
4345
4346 return remain;
4347}
4348
Suraj Sumangala99811512010-07-14 13:02:19 +05304349#define STREAM_REASSEMBLY 0
4350
4351int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4352{
4353 int type;
4354 int rem = 0;
4355
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004356 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304357 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4358
4359 if (!skb) {
4360 struct { char type; } *pkt;
4361
4362 /* Start of the frame */
4363 pkt = data;
4364 type = pkt->type;
4365
4366 data++;
4367 count--;
4368 } else
4369 type = bt_cb(skb)->pkt_type;
4370
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004371 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004372 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304373 if (rem < 0)
4374 return rem;
4375
4376 data += (count - rem);
4377 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004378 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304379
4380 return rem;
4381}
4382EXPORT_SYMBOL(hci_recv_stream_fragment);
4383
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384/* ---- Interface to upper protocols ---- */
4385
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386int hci_register_cb(struct hci_cb *cb)
4387{
4388 BT_DBG("%p name %s", cb, cb->name);
4389
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004390 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004392 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393
4394 return 0;
4395}
4396EXPORT_SYMBOL(hci_register_cb);
4397
4398int hci_unregister_cb(struct hci_cb *cb)
4399{
4400 BT_DBG("%p name %s", cb, cb->name);
4401
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004402 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004404 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405
4406 return 0;
4407}
4408EXPORT_SYMBOL(hci_unregister_cb);
4409
Marcel Holtmann51086992013-10-10 14:54:19 -07004410static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004412 int err;
4413
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004414 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004416 /* Time stamp */
4417 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004419 /* Send copy to monitor */
4420 hci_send_to_monitor(hdev, skb);
4421
4422 if (atomic_read(&hdev->promisc)) {
4423 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004424 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425 }
4426
4427 /* Get rid of skb owner, prior to sending to the driver. */
4428 skb_orphan(skb);
4429
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004430 err = hdev->send(hdev, skb);
4431 if (err < 0) {
4432 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4433 kfree_skb(skb);
4434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435}
4436
Marcel Holtmann899de762014-07-11 05:51:58 +02004437bool hci_req_pending(struct hci_dev *hdev)
4438{
4439 return (hdev->req_status == HCI_REQ_PEND);
4440}
4441
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004442/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004443int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4444 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004445{
4446 struct sk_buff *skb;
4447
4448 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4449
4450 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4451 if (!skb) {
4452 BT_ERR("%s no memory for command", hdev->name);
4453 return -ENOMEM;
4454 }
4455
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004456 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004457 * single-command requests.
4458 */
4459 bt_cb(skb)->req.start = true;
4460
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004462 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
4464 return 0;
4465}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466
4467/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004468void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469{
4470 struct hci_command_hdr *hdr;
4471
4472 if (!hdev->sent_cmd)
4473 return NULL;
4474
4475 hdr = (void *) hdev->sent_cmd->data;
4476
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004477 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 return NULL;
4479
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004480 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481
4482 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4483}
4484
4485/* Send ACL data */
4486static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4487{
4488 struct hci_acl_hdr *hdr;
4489 int len = skb->len;
4490
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004491 skb_push(skb, HCI_ACL_HDR_SIZE);
4492 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004493 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004494 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4495 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496}
4497
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004498static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004499 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004501 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 struct hci_dev *hdev = conn->hdev;
4503 struct sk_buff *list;
4504
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004505 skb->len = skb_headlen(skb);
4506 skb->data_len = 0;
4507
4508 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004509
4510 switch (hdev->dev_type) {
4511 case HCI_BREDR:
4512 hci_add_acl_hdr(skb, conn->handle, flags);
4513 break;
4514 case HCI_AMP:
4515 hci_add_acl_hdr(skb, chan->handle, flags);
4516 break;
4517 default:
4518 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4519 return;
4520 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004521
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004522 list = skb_shinfo(skb)->frag_list;
4523 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 /* Non fragmented */
4525 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4526
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004527 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528 } else {
4529 /* Fragmented */
4530 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4531
4532 skb_shinfo(skb)->frag_list = NULL;
4533
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004534 /* Queue all fragments atomically. We need to use spin_lock_bh
4535 * here because of 6LoWPAN links, as there this function is
4536 * called from softirq and using normal spin lock could cause
4537 * deadlocks.
4538 */
4539 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004541 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004542
4543 flags &= ~ACL_START;
4544 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 do {
4546 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004547
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004548 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004549 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004550
4551 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4552
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004553 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004554 } while (list);
4555
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004556 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004558}
4559
4560void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4561{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004562 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004563
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004564 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004565
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004566 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004568 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570
4571/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004572void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573{
4574 struct hci_dev *hdev = conn->hdev;
4575 struct hci_sco_hdr hdr;
4576
4577 BT_DBG("%s len %d", hdev->name, skb->len);
4578
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004579 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580 hdr.dlen = skb->len;
4581
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004582 skb_push(skb, HCI_SCO_HDR_SIZE);
4583 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004584 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004586 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004587
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004589 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591
4592/* ---- HCI TX task (outgoing data) ---- */
4593
4594/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004595static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4596 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597{
4598 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004599 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004600 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004602 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004604
4605 rcu_read_lock();
4606
4607 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004608 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004610
4611 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4612 continue;
4613
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614 num++;
4615
4616 if (c->sent < min) {
4617 min = c->sent;
4618 conn = c;
4619 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004620
4621 if (hci_conn_num(hdev, type) == num)
4622 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 }
4624
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004625 rcu_read_unlock();
4626
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004628 int cnt, q;
4629
4630 switch (conn->type) {
4631 case ACL_LINK:
4632 cnt = hdev->acl_cnt;
4633 break;
4634 case SCO_LINK:
4635 case ESCO_LINK:
4636 cnt = hdev->sco_cnt;
4637 break;
4638 case LE_LINK:
4639 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4640 break;
4641 default:
4642 cnt = 0;
4643 BT_ERR("Unknown link type");
4644 }
4645
4646 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647 *quote = q ? q : 1;
4648 } else
4649 *quote = 0;
4650
4651 BT_DBG("conn %p quote %d", conn, *quote);
4652 return conn;
4653}
4654
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004655static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656{
4657 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004658 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659
Ville Tervobae1f5d92011-02-10 22:38:53 -03004660 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004662 rcu_read_lock();
4663
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004665 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004666 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004667 BT_ERR("%s killing stalled connection %pMR",
4668 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004669 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 }
4671 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004672
4673 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674}
4675
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004676static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4677 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004678{
4679 struct hci_conn_hash *h = &hdev->conn_hash;
4680 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004681 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004682 struct hci_conn *conn;
4683 int cnt, q, conn_num = 0;
4684
4685 BT_DBG("%s", hdev->name);
4686
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004687 rcu_read_lock();
4688
4689 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004690 struct hci_chan *tmp;
4691
4692 if (conn->type != type)
4693 continue;
4694
4695 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4696 continue;
4697
4698 conn_num++;
4699
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004700 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004701 struct sk_buff *skb;
4702
4703 if (skb_queue_empty(&tmp->data_q))
4704 continue;
4705
4706 skb = skb_peek(&tmp->data_q);
4707 if (skb->priority < cur_prio)
4708 continue;
4709
4710 if (skb->priority > cur_prio) {
4711 num = 0;
4712 min = ~0;
4713 cur_prio = skb->priority;
4714 }
4715
4716 num++;
4717
4718 if (conn->sent < min) {
4719 min = conn->sent;
4720 chan = tmp;
4721 }
4722 }
4723
4724 if (hci_conn_num(hdev, type) == conn_num)
4725 break;
4726 }
4727
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004728 rcu_read_unlock();
4729
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004730 if (!chan)
4731 return NULL;
4732
4733 switch (chan->conn->type) {
4734 case ACL_LINK:
4735 cnt = hdev->acl_cnt;
4736 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004737 case AMP_LINK:
4738 cnt = hdev->block_cnt;
4739 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004740 case SCO_LINK:
4741 case ESCO_LINK:
4742 cnt = hdev->sco_cnt;
4743 break;
4744 case LE_LINK:
4745 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4746 break;
4747 default:
4748 cnt = 0;
4749 BT_ERR("Unknown link type");
4750 }
4751
4752 q = cnt / num;
4753 *quote = q ? q : 1;
4754 BT_DBG("chan %p quote %d", chan, *quote);
4755 return chan;
4756}
4757
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004758static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4759{
4760 struct hci_conn_hash *h = &hdev->conn_hash;
4761 struct hci_conn *conn;
4762 int num = 0;
4763
4764 BT_DBG("%s", hdev->name);
4765
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004766 rcu_read_lock();
4767
4768 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004769 struct hci_chan *chan;
4770
4771 if (conn->type != type)
4772 continue;
4773
4774 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4775 continue;
4776
4777 num++;
4778
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004779 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004780 struct sk_buff *skb;
4781
4782 if (chan->sent) {
4783 chan->sent = 0;
4784 continue;
4785 }
4786
4787 if (skb_queue_empty(&chan->data_q))
4788 continue;
4789
4790 skb = skb_peek(&chan->data_q);
4791 if (skb->priority >= HCI_PRIO_MAX - 1)
4792 continue;
4793
4794 skb->priority = HCI_PRIO_MAX - 1;
4795
4796 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004797 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004798 }
4799
4800 if (hci_conn_num(hdev, type) == num)
4801 break;
4802 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004803
4804 rcu_read_unlock();
4805
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004806}
4807
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004808static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4809{
4810 /* Calculate count of blocks used by this packet */
4811 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4812}
4813
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004814static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004816 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817 /* ACL tx timeout must be longer than maximum
4818 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004819 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004820 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004821 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004823}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004824
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004825static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004826{
4827 unsigned int cnt = hdev->acl_cnt;
4828 struct hci_chan *chan;
4829 struct sk_buff *skb;
4830 int quote;
4831
4832 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004833
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004834 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004835 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004836 u32 priority = (skb_peek(&chan->data_q))->priority;
4837 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004838 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004839 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004840
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004841 /* Stop if priority has changed */
4842 if (skb->priority < priority)
4843 break;
4844
4845 skb = skb_dequeue(&chan->data_q);
4846
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004847 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004848 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004849
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004850 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851 hdev->acl_last_tx = jiffies;
4852
4853 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004854 chan->sent++;
4855 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856 }
4857 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004858
4859 if (cnt != hdev->acl_cnt)
4860 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004861}
4862
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004863static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004864{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004865 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004866 struct hci_chan *chan;
4867 struct sk_buff *skb;
4868 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004869 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004870
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004871 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004872
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004873 BT_DBG("%s", hdev->name);
4874
4875 if (hdev->dev_type == HCI_AMP)
4876 type = AMP_LINK;
4877 else
4878 type = ACL_LINK;
4879
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004880 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004881 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004882 u32 priority = (skb_peek(&chan->data_q))->priority;
4883 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4884 int blocks;
4885
4886 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004887 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004888
4889 /* Stop if priority has changed */
4890 if (skb->priority < priority)
4891 break;
4892
4893 skb = skb_dequeue(&chan->data_q);
4894
4895 blocks = __get_blocks(hdev, skb);
4896 if (blocks > hdev->block_cnt)
4897 return;
4898
4899 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004900 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004901
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004902 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004903 hdev->acl_last_tx = jiffies;
4904
4905 hdev->block_cnt -= blocks;
4906 quote -= blocks;
4907
4908 chan->sent += blocks;
4909 chan->conn->sent += blocks;
4910 }
4911 }
4912
4913 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004914 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004915}
4916
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004917static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004918{
4919 BT_DBG("%s", hdev->name);
4920
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004921 /* No ACL link over BR/EDR controller */
4922 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4923 return;
4924
4925 /* No AMP link over AMP controller */
4926 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004927 return;
4928
4929 switch (hdev->flow_ctl_mode) {
4930 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4931 hci_sched_acl_pkt(hdev);
4932 break;
4933
4934 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4935 hci_sched_acl_blk(hdev);
4936 break;
4937 }
4938}
4939
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004941static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004942{
4943 struct hci_conn *conn;
4944 struct sk_buff *skb;
4945 int quote;
4946
4947 BT_DBG("%s", hdev->name);
4948
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004949 if (!hci_conn_num(hdev, SCO_LINK))
4950 return;
4951
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4953 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4954 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004955 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956
4957 conn->sent++;
4958 if (conn->sent == ~0)
4959 conn->sent = 0;
4960 }
4961 }
4962}
4963
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004964static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004965{
4966 struct hci_conn *conn;
4967 struct sk_buff *skb;
4968 int quote;
4969
4970 BT_DBG("%s", hdev->name);
4971
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004972 if (!hci_conn_num(hdev, ESCO_LINK))
4973 return;
4974
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004975 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4976 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004977 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4978 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004979 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004980
4981 conn->sent++;
4982 if (conn->sent == ~0)
4983 conn->sent = 0;
4984 }
4985 }
4986}
4987
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004988static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004989{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004990 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004991 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004992 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004993
4994 BT_DBG("%s", hdev->name);
4995
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004996 if (!hci_conn_num(hdev, LE_LINK))
4997 return;
4998
Marcel Holtmann4a964402014-07-02 19:10:33 +02004999 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005000 /* LE tx timeout must be longer than maximum
5001 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005002 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005003 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005004 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005005 }
5006
5007 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005008 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005009 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005010 u32 priority = (skb_peek(&chan->data_q))->priority;
5011 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005012 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005013 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005014
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005015 /* Stop if priority has changed */
5016 if (skb->priority < priority)
5017 break;
5018
5019 skb = skb_dequeue(&chan->data_q);
5020
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005021 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005022 hdev->le_last_tx = jiffies;
5023
5024 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005025 chan->sent++;
5026 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005027 }
5028 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005029
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005030 if (hdev->le_pkts)
5031 hdev->le_cnt = cnt;
5032 else
5033 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005034
5035 if (cnt != tmp)
5036 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005037}
5038
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005039static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005041 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042 struct sk_buff *skb;
5043
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005044 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005045 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046
Marcel Holtmann52de5992013-09-03 18:08:38 -07005047 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5048 /* Schedule queues and send stuff to HCI driver */
5049 hci_sched_acl(hdev);
5050 hci_sched_sco(hdev);
5051 hci_sched_esco(hdev);
5052 hci_sched_le(hdev);
5053 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005054
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055 /* Send next queued raw (unknown type) packet */
5056 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005057 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005058}
5059
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005060/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005061
5062/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005063static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064{
5065 struct hci_acl_hdr *hdr = (void *) skb->data;
5066 struct hci_conn *conn;
5067 __u16 handle, flags;
5068
5069 skb_pull(skb, HCI_ACL_HDR_SIZE);
5070
5071 handle = __le16_to_cpu(hdr->handle);
5072 flags = hci_flags(handle);
5073 handle = hci_handle(handle);
5074
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005075 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005076 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005077
5078 hdev->stat.acl_rx++;
5079
5080 hci_dev_lock(hdev);
5081 conn = hci_conn_hash_lookup_handle(hdev, handle);
5082 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005083
Linus Torvalds1da177e2005-04-16 15:20:36 -07005084 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005085 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005086
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005088 l2cap_recv_acldata(conn, skb, flags);
5089 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005091 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005092 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093 }
5094
5095 kfree_skb(skb);
5096}
5097
5098/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005099static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100{
5101 struct hci_sco_hdr *hdr = (void *) skb->data;
5102 struct hci_conn *conn;
5103 __u16 handle;
5104
5105 skb_pull(skb, HCI_SCO_HDR_SIZE);
5106
5107 handle = __le16_to_cpu(hdr->handle);
5108
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005109 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110
5111 hdev->stat.sco_rx++;
5112
5113 hci_dev_lock(hdev);
5114 conn = hci_conn_hash_lookup_handle(hdev, handle);
5115 hci_dev_unlock(hdev);
5116
5117 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005118 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005119 sco_recv_scodata(conn, skb);
5120 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005122 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005123 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124 }
5125
5126 kfree_skb(skb);
5127}
5128
Johan Hedberg9238f362013-03-05 20:37:48 +02005129static bool hci_req_is_complete(struct hci_dev *hdev)
5130{
5131 struct sk_buff *skb;
5132
5133 skb = skb_peek(&hdev->cmd_q);
5134 if (!skb)
5135 return true;
5136
5137 return bt_cb(skb)->req.start;
5138}
5139
Johan Hedberg42c6b122013-03-05 20:37:49 +02005140static void hci_resend_last(struct hci_dev *hdev)
5141{
5142 struct hci_command_hdr *sent;
5143 struct sk_buff *skb;
5144 u16 opcode;
5145
5146 if (!hdev->sent_cmd)
5147 return;
5148
5149 sent = (void *) hdev->sent_cmd->data;
5150 opcode = __le16_to_cpu(sent->opcode);
5151 if (opcode == HCI_OP_RESET)
5152 return;
5153
5154 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5155 if (!skb)
5156 return;
5157
5158 skb_queue_head(&hdev->cmd_q, skb);
5159 queue_work(hdev->workqueue, &hdev->cmd_work);
5160}
5161
Johan Hedberg9238f362013-03-05 20:37:48 +02005162void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5163{
5164 hci_req_complete_t req_complete = NULL;
5165 struct sk_buff *skb;
5166 unsigned long flags;
5167
5168 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5169
Johan Hedberg42c6b122013-03-05 20:37:49 +02005170 /* If the completed command doesn't match the last one that was
5171 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005172 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005173 if (!hci_sent_cmd_data(hdev, opcode)) {
5174 /* Some CSR based controllers generate a spontaneous
5175 * reset complete event during init and any pending
5176 * command will never be completed. In such a case we
5177 * need to resend whatever was the last sent
5178 * command.
5179 */
5180 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5181 hci_resend_last(hdev);
5182
Johan Hedberg9238f362013-03-05 20:37:48 +02005183 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005184 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005185
5186 /* If the command succeeded and there's still more commands in
5187 * this request the request is not yet complete.
5188 */
5189 if (!status && !hci_req_is_complete(hdev))
5190 return;
5191
5192 /* If this was the last command in a request the complete
5193 * callback would be found in hdev->sent_cmd instead of the
5194 * command queue (hdev->cmd_q).
5195 */
5196 if (hdev->sent_cmd) {
5197 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005198
5199 if (req_complete) {
5200 /* We must set the complete callback to NULL to
5201 * avoid calling the callback more than once if
5202 * this function gets called again.
5203 */
5204 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5205
Johan Hedberg9238f362013-03-05 20:37:48 +02005206 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005207 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005208 }
5209
5210 /* Remove all pending commands belonging to this request */
5211 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5212 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5213 if (bt_cb(skb)->req.start) {
5214 __skb_queue_head(&hdev->cmd_q, skb);
5215 break;
5216 }
5217
5218 req_complete = bt_cb(skb)->req.complete;
5219 kfree_skb(skb);
5220 }
5221 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5222
5223call_complete:
5224 if (req_complete)
5225 req_complete(hdev, status);
5226}
5227
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005228static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005230 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 struct sk_buff *skb;
5232
5233 BT_DBG("%s", hdev->name);
5234
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005236 /* Send copy to monitor */
5237 hci_send_to_monitor(hdev, skb);
5238
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239 if (atomic_read(&hdev->promisc)) {
5240 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005241 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242 }
5243
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005244 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 kfree_skb(skb);
5246 continue;
5247 }
5248
5249 if (test_bit(HCI_INIT, &hdev->flags)) {
5250 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005251 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 case HCI_ACLDATA_PKT:
5253 case HCI_SCODATA_PKT:
5254 kfree_skb(skb);
5255 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 }
5258
5259 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005260 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005262 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263 hci_event_packet(hdev, skb);
5264 break;
5265
5266 case HCI_ACLDATA_PKT:
5267 BT_DBG("%s ACL data packet", hdev->name);
5268 hci_acldata_packet(hdev, skb);
5269 break;
5270
5271 case HCI_SCODATA_PKT:
5272 BT_DBG("%s SCO data packet", hdev->name);
5273 hci_scodata_packet(hdev, skb);
5274 break;
5275
5276 default:
5277 kfree_skb(skb);
5278 break;
5279 }
5280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005281}
5282
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005283static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005284{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005285 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286 struct sk_buff *skb;
5287
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005288 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5289 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005290
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005292 if (atomic_read(&hdev->cmd_cnt)) {
5293 skb = skb_dequeue(&hdev->cmd_q);
5294 if (!skb)
5295 return;
5296
Wei Yongjun7585b972009-02-25 18:29:52 +08005297 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005299 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005300 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005301 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005302 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005303 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005304 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005305 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005306 schedule_delayed_work(&hdev->cmd_timer,
5307 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005308 } else {
5309 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005310 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311 }
5312 }
5313}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005314
Andre Guedesa4790db2014-02-26 20:21:47 -03005315static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5316{
5317 if (status)
5318 BT_DBG("HCI request failed to update background scanning: "
5319 "status 0x%2.2x", status);
5320}
5321
5322/* This function controls the background scanning based on hdev->pend_le_conns
5323 * list. If there are pending LE connection we start the background scanning,
5324 * otherwise we stop it.
5325 *
5326 * This function requires the caller holds hdev->lock.
5327 */
5328void hci_update_background_scan(struct hci_dev *hdev)
5329{
Andre Guedesa4790db2014-02-26 20:21:47 -03005330 struct hci_request req;
5331 struct hci_conn *conn;
5332 int err;
5333
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005334 if (!test_bit(HCI_UP, &hdev->flags) ||
5335 test_bit(HCI_INIT, &hdev->flags) ||
5336 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02005337 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005338 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005339 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005340 return;
5341
Johan Hedberga70f4b52014-07-07 15:19:50 +03005342 /* No point in doing scanning if LE support hasn't been enabled */
5343 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5344 return;
5345
Johan Hedbergae23ada2014-07-07 13:24:59 +03005346 /* If discovery is active don't interfere with it */
5347 if (hdev->discovery.state != DISCOVERY_STOPPED)
5348 return;
5349
Marcel Holtmannee3c3ca2014-12-05 11:45:23 +01005350 /* Reset RSSI and UUID filters when starting background scanning
5351 * since these filters are meant for service discovery only.
5352 *
5353 * The Start Discovery and Start Service Discovery operations
5354 * ensure to set proper values for RSSI threshold and UUID
5355 * filter list. So it is safe to just reset them here.
5356 */
5357 hci_discovery_filter_clear(hdev);
5358
Andre Guedesa4790db2014-02-26 20:21:47 -03005359 hci_req_init(&req, hdev);
5360
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005361 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005362 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005363 /* If there is no pending LE connections or devices
5364 * to be scanned for, we should stop the background
5365 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005366 */
5367
5368 /* If controller is not scanning we are done. */
5369 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5370 return;
5371
5372 hci_req_add_le_scan_disable(&req);
5373
5374 BT_DBG("%s stopping background scanning", hdev->name);
5375 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005376 /* If there is at least one pending LE connection, we should
5377 * keep the background scan running.
5378 */
5379
Andre Guedesa4790db2014-02-26 20:21:47 -03005380 /* If controller is connecting, we should not start scanning
5381 * since some controllers are not able to scan and connect at
5382 * the same time.
5383 */
5384 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5385 if (conn)
5386 return;
5387
Andre Guedes4340a122014-03-10 18:26:24 -03005388 /* If controller is currently scanning, we stop it to ensure we
5389 * don't miss any advertising (due to duplicates filter).
5390 */
5391 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5392 hci_req_add_le_scan_disable(&req);
5393
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005394 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005395
5396 BT_DBG("%s starting background scanning", hdev->name);
5397 }
5398
5399 err = hci_req_run(&req, update_background_scan_complete);
5400 if (err)
5401 BT_ERR("Failed to run HCI request: err %d", err);
5402}
Johan Hedberg432df052014-08-01 11:13:31 +03005403
Johan Hedberg22f433d2014-08-01 11:13:32 +03005404static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5405{
5406 struct bdaddr_list *b;
5407
5408 list_for_each_entry(b, &hdev->whitelist, list) {
5409 struct hci_conn *conn;
5410
5411 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5412 if (!conn)
5413 return true;
5414
5415 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5416 return true;
5417 }
5418
5419 return false;
5420}
5421
Johan Hedberg1d2dc5b2014-12-19 13:40:19 +02005422void __hci_update_page_scan(struct hci_request *req)
Johan Hedberg432df052014-08-01 11:13:31 +03005423{
Johan Hedberg1d2dc5b2014-12-19 13:40:19 +02005424 struct hci_dev *hdev = req->hdev;
Johan Hedberg432df052014-08-01 11:13:31 +03005425 u8 scan;
5426
5427 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5428 return;
5429
5430 if (!hdev_is_powered(hdev))
5431 return;
5432
5433 if (mgmt_powering_down(hdev))
5434 return;
5435
5436 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005437 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005438 scan = SCAN_PAGE;
5439 else
5440 scan = SCAN_DISABLED;
5441
5442 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5443 return;
5444
5445 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5446 scan |= SCAN_INQUIRY;
5447
Johan Hedberg1d2dc5b2014-12-19 13:40:19 +02005448 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5449}
5450
5451void hci_update_page_scan(struct hci_dev *hdev)
5452{
5453 struct hci_request req;
5454
5455 hci_req_init(&req, hdev);
5456 __hci_update_page_scan(&req);
5457 hci_req_run(&req, NULL);
Johan Hedberg432df052014-08-01 11:13:31 +03005458}