blob: 8149e1303e2bf624e72c8bca0187d73548c9fed1 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -070068 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070069 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -070075 if (lmp_le_capable(hdev))
76 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
77 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
78 hdev->le_features[0], hdev->le_features[1],
79 hdev->le_features[2], hdev->le_features[3],
80 hdev->le_features[4], hdev->le_features[5],
81 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070082 hci_dev_unlock(hdev);
83
84 return 0;
85}
86
87static int features_open(struct inode *inode, struct file *file)
88{
89 return single_open(file, features_show, inode->i_private);
90}
91
92static const struct file_operations features_fops = {
93 .open = features_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = single_release,
97};
98
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070099static int blacklist_show(struct seq_file *f, void *p)
100{
101 struct hci_dev *hdev = f->private;
102 struct bdaddr_list *b;
103
104 hci_dev_lock(hdev);
105 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700106 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700107 hci_dev_unlock(hdev);
108
109 return 0;
110}
111
112static int blacklist_open(struct inode *inode, struct file *file)
113{
114 return single_open(file, blacklist_show, inode->i_private);
115}
116
117static const struct file_operations blacklist_fops = {
118 .open = blacklist_open,
119 .read = seq_read,
120 .llseek = seq_lseek,
121 .release = single_release,
122};
123
Marcel Holtmann47219832013-10-17 17:24:15 -0700124static int uuids_show(struct seq_file *f, void *p)
125{
126 struct hci_dev *hdev = f->private;
127 struct bt_uuid *uuid;
128
129 hci_dev_lock(hdev);
130 list_for_each_entry(uuid, &hdev->uuids, list) {
131 u32 data0, data5;
132 u16 data1, data2, data3, data4;
133
134 data5 = get_unaligned_le32(uuid);
135 data4 = get_unaligned_le16(uuid + 4);
136 data3 = get_unaligned_le16(uuid + 6);
137 data2 = get_unaligned_le16(uuid + 8);
138 data1 = get_unaligned_le16(uuid + 10);
139 data0 = get_unaligned_le32(uuid + 12);
140
141 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
142 data0, data1, data2, data3, data4, data5);
143 }
144 hci_dev_unlock(hdev);
145
146 return 0;
147}
148
149static int uuids_open(struct inode *inode, struct file *file)
150{
151 return single_open(file, uuids_show, inode->i_private);
152}
153
154static const struct file_operations uuids_fops = {
155 .open = uuids_open,
156 .read = seq_read,
157 .llseek = seq_lseek,
158 .release = single_release,
159};
160
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700161static int inquiry_cache_show(struct seq_file *f, void *p)
162{
163 struct hci_dev *hdev = f->private;
164 struct discovery_state *cache = &hdev->discovery;
165 struct inquiry_entry *e;
166
167 hci_dev_lock(hdev);
168
169 list_for_each_entry(e, &cache->all, all) {
170 struct inquiry_data *data = &e->data;
171 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
172 &data->bdaddr,
173 data->pscan_rep_mode, data->pscan_period_mode,
174 data->pscan_mode, data->dev_class[2],
175 data->dev_class[1], data->dev_class[0],
176 __le16_to_cpu(data->clock_offset),
177 data->rssi, data->ssp_mode, e->timestamp);
178 }
179
180 hci_dev_unlock(hdev);
181
182 return 0;
183}
184
185static int inquiry_cache_open(struct inode *inode, struct file *file)
186{
187 return single_open(file, inquiry_cache_show, inode->i_private);
188}
189
190static const struct file_operations inquiry_cache_fops = {
191 .open = inquiry_cache_open,
192 .read = seq_read,
193 .llseek = seq_lseek,
194 .release = single_release,
195};
196
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700197static int link_keys_show(struct seq_file *f, void *ptr)
198{
199 struct hci_dev *hdev = f->private;
200 struct list_head *p, *n;
201
202 hci_dev_lock(hdev);
203 list_for_each_safe(p, n, &hdev->link_keys) {
204 struct link_key *key = list_entry(p, struct link_key, list);
205 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
206 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
207 }
208 hci_dev_unlock(hdev);
209
210 return 0;
211}
212
213static int link_keys_open(struct inode *inode, struct file *file)
214{
215 return single_open(file, link_keys_show, inode->i_private);
216}
217
218static const struct file_operations link_keys_fops = {
219 .open = link_keys_open,
220 .read = seq_read,
221 .llseek = seq_lseek,
222 .release = single_release,
223};
224
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700225static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
226 size_t count, loff_t *ppos)
227{
228 struct hci_dev *hdev = file->private_data;
229 char buf[3];
230
231 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
232 buf[1] = '\n';
233 buf[2] = '\0';
234 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
235}
236
237static const struct file_operations use_debug_keys_fops = {
238 .open = simple_open,
239 .read = use_debug_keys_read,
240 .llseek = default_llseek,
241};
242
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700243static int dev_class_show(struct seq_file *f, void *ptr)
244{
245 struct hci_dev *hdev = f->private;
246
247 hci_dev_lock(hdev);
248 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
249 hdev->dev_class[1], hdev->dev_class[0]);
250 hci_dev_unlock(hdev);
251
252 return 0;
253}
254
255static int dev_class_open(struct inode *inode, struct file *file)
256{
257 return single_open(file, dev_class_show, inode->i_private);
258}
259
260static const struct file_operations dev_class_fops = {
261 .open = dev_class_open,
262 .read = seq_read,
263 .llseek = seq_lseek,
264 .release = single_release,
265};
266
Marcel Holtmann041000b2013-10-17 12:02:31 -0700267static int voice_setting_get(void *data, u64 *val)
268{
269 struct hci_dev *hdev = data;
270
271 hci_dev_lock(hdev);
272 *val = hdev->voice_setting;
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
279 NULL, "0x%4.4llx\n");
280
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700281static int auto_accept_delay_set(void *data, u64 val)
282{
283 struct hci_dev *hdev = data;
284
285 hci_dev_lock(hdev);
286 hdev->auto_accept_delay = val;
287 hci_dev_unlock(hdev);
288
289 return 0;
290}
291
292static int auto_accept_delay_get(void *data, u64 *val)
293{
294 struct hci_dev *hdev = data;
295
296 hci_dev_lock(hdev);
297 *val = hdev->auto_accept_delay;
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
304 auto_accept_delay_set, "%llu\n");
305
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700306static int ssp_debug_mode_set(void *data, u64 val)
307{
308 struct hci_dev *hdev = data;
309 struct sk_buff *skb;
310 __u8 mode;
311 int err;
312
313 if (val != 0 && val != 1)
314 return -EINVAL;
315
316 if (!test_bit(HCI_UP, &hdev->flags))
317 return -ENETDOWN;
318
319 hci_req_lock(hdev);
320 mode = val;
321 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
322 &mode, HCI_CMD_TIMEOUT);
323 hci_req_unlock(hdev);
324
325 if (IS_ERR(skb))
326 return PTR_ERR(skb);
327
328 err = -bt_to_errno(skb->data[0]);
329 kfree_skb(skb);
330
331 if (err < 0)
332 return err;
333
334 hci_dev_lock(hdev);
335 hdev->ssp_debug_mode = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int ssp_debug_mode_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->ssp_debug_mode;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
353 ssp_debug_mode_set, "%llu\n");
354
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700355static int idle_timeout_set(void *data, u64 val)
356{
357 struct hci_dev *hdev = data;
358
359 if (val != 0 && (val < 500 || val > 3600000))
360 return -EINVAL;
361
362 hci_dev_lock(hdev);
363 hdev->idle_timeout= val;
364 hci_dev_unlock(hdev);
365
366 return 0;
367}
368
369static int idle_timeout_get(void *data, u64 *val)
370{
371 struct hci_dev *hdev = data;
372
373 hci_dev_lock(hdev);
374 *val = hdev->idle_timeout;
375 hci_dev_unlock(hdev);
376
377 return 0;
378}
379
380DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
381 idle_timeout_set, "%llu\n");
382
383static int sniff_min_interval_set(void *data, u64 val)
384{
385 struct hci_dev *hdev = data;
386
387 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
388 return -EINVAL;
389
390 hci_dev_lock(hdev);
391 hdev->sniff_min_interval= val;
392 hci_dev_unlock(hdev);
393
394 return 0;
395}
396
397static int sniff_min_interval_get(void *data, u64 *val)
398{
399 struct hci_dev *hdev = data;
400
401 hci_dev_lock(hdev);
402 *val = hdev->sniff_min_interval;
403 hci_dev_unlock(hdev);
404
405 return 0;
406}
407
408DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
409 sniff_min_interval_set, "%llu\n");
410
411static int sniff_max_interval_set(void *data, u64 val)
412{
413 struct hci_dev *hdev = data;
414
415 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
416 return -EINVAL;
417
418 hci_dev_lock(hdev);
419 hdev->sniff_max_interval= val;
420 hci_dev_unlock(hdev);
421
422 return 0;
423}
424
425static int sniff_max_interval_get(void *data, u64 *val)
426{
427 struct hci_dev *hdev = data;
428
429 hci_dev_lock(hdev);
430 *val = hdev->sniff_max_interval;
431 hci_dev_unlock(hdev);
432
433 return 0;
434}
435
436DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
437 sniff_max_interval_set, "%llu\n");
438
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700439static int static_address_show(struct seq_file *f, void *p)
440{
441 struct hci_dev *hdev = f->private;
442
443 hci_dev_lock(hdev);
444 seq_printf(f, "%pMR\n", &hdev->static_addr);
445 hci_dev_unlock(hdev);
446
447 return 0;
448}
449
450static int static_address_open(struct inode *inode, struct file *file)
451{
452 return single_open(file, static_address_show, inode->i_private);
453}
454
455static const struct file_operations static_address_fops = {
456 .open = static_address_open,
457 .read = seq_read,
458 .llseek = seq_lseek,
459 .release = single_release,
460};
461
Marcel Holtmann92202182013-10-18 16:38:10 -0700462static int own_address_type_set(void *data, u64 val)
463{
464 struct hci_dev *hdev = data;
465
466 if (val != 0 && val != 1)
467 return -EINVAL;
468
469 hci_dev_lock(hdev);
470 hdev->own_addr_type = val;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476static int own_address_type_get(void *data, u64 *val)
477{
478 struct hci_dev *hdev = data;
479
480 hci_dev_lock(hdev);
481 *val = hdev->own_addr_type;
482 hci_dev_unlock(hdev);
483
484 return 0;
485}
486
487DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
488 own_address_type_set, "%llu\n");
489
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700490static int long_term_keys_show(struct seq_file *f, void *ptr)
491{
492 struct hci_dev *hdev = f->private;
493 struct list_head *p, *n;
494
495 hci_dev_lock(hdev);
496 list_for_each_safe(p, n, &hdev->link_keys) {
497 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
498 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
499 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
500 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
501 8, ltk->rand, 16, ltk->val);
502 }
503 hci_dev_unlock(hdev);
504
505 return 0;
506}
507
508static int long_term_keys_open(struct inode *inode, struct file *file)
509{
510 return single_open(file, long_term_keys_show, inode->i_private);
511}
512
513static const struct file_operations long_term_keys_fops = {
514 .open = long_term_keys_open,
515 .read = seq_read,
516 .llseek = seq_lseek,
517 .release = single_release,
518};
519
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700520static int conn_min_interval_set(void *data, u64 val)
521{
522 struct hci_dev *hdev = data;
523
524 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
525 return -EINVAL;
526
527 hci_dev_lock(hdev);
528 hdev->le_conn_min_interval= val;
529 hci_dev_unlock(hdev);
530
531 return 0;
532}
533
534static int conn_min_interval_get(void *data, u64 *val)
535{
536 struct hci_dev *hdev = data;
537
538 hci_dev_lock(hdev);
539 *val = hdev->le_conn_min_interval;
540 hci_dev_unlock(hdev);
541
542 return 0;
543}
544
545DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
546 conn_min_interval_set, "%llu\n");
547
548static int conn_max_interval_set(void *data, u64 val)
549{
550 struct hci_dev *hdev = data;
551
552 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
553 return -EINVAL;
554
555 hci_dev_lock(hdev);
556 hdev->le_conn_max_interval= val;
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int conn_max_interval_get(void *data, u64 *val)
563{
564 struct hci_dev *hdev = data;
565
566 hci_dev_lock(hdev);
567 *val = hdev->le_conn_max_interval;
568 hci_dev_unlock(hdev);
569
570 return 0;
571}
572
573DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
574 conn_max_interval_set, "%llu\n");
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576/* ---- HCI requests ---- */
577
Johan Hedberg42c6b122013-03-05 20:37:49 +0200578static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200580 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
582 if (hdev->req_status == HCI_REQ_PEND) {
583 hdev->req_result = result;
584 hdev->req_status = HCI_REQ_DONE;
585 wake_up_interruptible(&hdev->req_wait_q);
586 }
587}
588
589static void hci_req_cancel(struct hci_dev *hdev, int err)
590{
591 BT_DBG("%s err 0x%2.2x", hdev->name, err);
592
593 if (hdev->req_status == HCI_REQ_PEND) {
594 hdev->req_result = err;
595 hdev->req_status = HCI_REQ_CANCELED;
596 wake_up_interruptible(&hdev->req_wait_q);
597 }
598}
599
Fengguang Wu77a63e02013-04-20 16:24:31 +0300600static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
601 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300602{
603 struct hci_ev_cmd_complete *ev;
604 struct hci_event_hdr *hdr;
605 struct sk_buff *skb;
606
607 hci_dev_lock(hdev);
608
609 skb = hdev->recv_evt;
610 hdev->recv_evt = NULL;
611
612 hci_dev_unlock(hdev);
613
614 if (!skb)
615 return ERR_PTR(-ENODATA);
616
617 if (skb->len < sizeof(*hdr)) {
618 BT_ERR("Too short HCI event");
619 goto failed;
620 }
621
622 hdr = (void *) skb->data;
623 skb_pull(skb, HCI_EVENT_HDR_SIZE);
624
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300625 if (event) {
626 if (hdr->evt != event)
627 goto failed;
628 return skb;
629 }
630
Johan Hedberg75e84b72013-04-02 13:35:04 +0300631 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
632 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
633 goto failed;
634 }
635
636 if (skb->len < sizeof(*ev)) {
637 BT_ERR("Too short cmd_complete event");
638 goto failed;
639 }
640
641 ev = (void *) skb->data;
642 skb_pull(skb, sizeof(*ev));
643
644 if (opcode == __le16_to_cpu(ev->opcode))
645 return skb;
646
647 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
648 __le16_to_cpu(ev->opcode));
649
650failed:
651 kfree_skb(skb);
652 return ERR_PTR(-ENODATA);
653}
654
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300655struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300656 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300657{
658 DECLARE_WAITQUEUE(wait, current);
659 struct hci_request req;
660 int err = 0;
661
662 BT_DBG("%s", hdev->name);
663
664 hci_req_init(&req, hdev);
665
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300666 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300667
668 hdev->req_status = HCI_REQ_PEND;
669
670 err = hci_req_run(&req, hci_req_sync_complete);
671 if (err < 0)
672 return ERR_PTR(err);
673
674 add_wait_queue(&hdev->req_wait_q, &wait);
675 set_current_state(TASK_INTERRUPTIBLE);
676
677 schedule_timeout(timeout);
678
679 remove_wait_queue(&hdev->req_wait_q, &wait);
680
681 if (signal_pending(current))
682 return ERR_PTR(-EINTR);
683
684 switch (hdev->req_status) {
685 case HCI_REQ_DONE:
686 err = -bt_to_errno(hdev->req_result);
687 break;
688
689 case HCI_REQ_CANCELED:
690 err = -hdev->req_result;
691 break;
692
693 default:
694 err = -ETIMEDOUT;
695 break;
696 }
697
698 hdev->req_status = hdev->req_result = 0;
699
700 BT_DBG("%s end: err %d", hdev->name, err);
701
702 if (err < 0)
703 return ERR_PTR(err);
704
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300705 return hci_get_cmd_complete(hdev, opcode, event);
706}
707EXPORT_SYMBOL(__hci_cmd_sync_ev);
708
709struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300710 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300711{
712 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300713}
714EXPORT_SYMBOL(__hci_cmd_sync);
715
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200717static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200718 void (*func)(struct hci_request *req,
719 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200720 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200722 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 DECLARE_WAITQUEUE(wait, current);
724 int err = 0;
725
726 BT_DBG("%s start", hdev->name);
727
Johan Hedberg42c6b122013-03-05 20:37:49 +0200728 hci_req_init(&req, hdev);
729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 hdev->req_status = HCI_REQ_PEND;
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200733
Johan Hedberg42c6b122013-03-05 20:37:49 +0200734 err = hci_req_run(&req, hci_req_sync_complete);
735 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200736 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300737
738 /* ENODATA means the HCI request command queue is empty.
739 * This can happen when a request with conditionals doesn't
740 * trigger any commands to be sent. This is normal behavior
741 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742 */
Andre Guedes920c8302013-03-08 11:20:15 -0300743 if (err == -ENODATA)
744 return 0;
745
746 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200747 }
748
Andre Guedesbc4445c2013-03-08 11:20:13 -0300749 add_wait_queue(&hdev->req_wait_q, &wait);
750 set_current_state(TASK_INTERRUPTIBLE);
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 schedule_timeout(timeout);
753
754 remove_wait_queue(&hdev->req_wait_q, &wait);
755
756 if (signal_pending(current))
757 return -EINTR;
758
759 switch (hdev->req_status) {
760 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700761 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 break;
763
764 case HCI_REQ_CANCELED:
765 err = -hdev->req_result;
766 break;
767
768 default:
769 err = -ETIMEDOUT;
770 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700771 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
Johan Hedberga5040ef2011-01-10 13:28:59 +0200773 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 BT_DBG("%s end: err %d", hdev->name, err);
776
777 return err;
778}
779
Johan Hedberg01178cd2013-03-05 20:37:41 +0200780static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200781 void (*req)(struct hci_request *req,
782 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200783 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784{
785 int ret;
786
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200787 if (!test_bit(HCI_UP, &hdev->flags))
788 return -ENETDOWN;
789
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 /* Serialize all requests */
791 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200792 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 hci_req_unlock(hdev);
794
795 return ret;
796}
797
Johan Hedberg42c6b122013-03-05 20:37:49 +0200798static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200800 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200803 set_bit(HCI_RESET, &req->hdev->flags);
804 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805}
806
Johan Hedberg42c6b122013-03-05 20:37:49 +0200807static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200809 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200812 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200814 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200815 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200816
817 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200818 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
820
Johan Hedberg42c6b122013-03-05 20:37:49 +0200821static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200822{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200823 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200824
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200825 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200826 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300827
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700828 /* Read Local Supported Commands */
829 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
830
831 /* Read Local Supported Features */
832 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
833
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300834 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200835 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300836
837 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200838 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700839
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700840 /* Read Flow Control Mode */
841 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
842
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700843 /* Read Location Data */
844 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200845}
846
Johan Hedberg42c6b122013-03-05 20:37:49 +0200847static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200848{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200849 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200850
851 BT_DBG("%s %ld", hdev->name, opt);
852
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300853 /* Reset */
854 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200855 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300856
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200857 switch (hdev->dev_type) {
858 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200859 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200860 break;
861
862 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200864 break;
865
866 default:
867 BT_ERR("Unknown device type %d", hdev->dev_type);
868 break;
869 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200870}
871
Johan Hedberg42c6b122013-03-05 20:37:49 +0200872static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200873{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700874 struct hci_dev *hdev = req->hdev;
875
Johan Hedberg2177bab2013-03-05 20:37:43 +0200876 __le16 param;
877 __u8 flt_type;
878
879 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200881
882 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200883 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200884
885 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200887
888 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200889 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200890
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700891 /* Read Number of Supported IAC */
892 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
893
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700894 /* Read Current IAC LAP */
895 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
896
Johan Hedberg2177bab2013-03-05 20:37:43 +0200897 /* Clear Event Filters */
898 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200899 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200900
901 /* Connection accept timeout ~20 secs */
902 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200903 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200904
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700905 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
906 * but it does not support page scan related HCI commands.
907 */
908 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500909 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
910 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
911 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200912}
913
Johan Hedberg42c6b122013-03-05 20:37:49 +0200914static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200915{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300916 struct hci_dev *hdev = req->hdev;
917
Johan Hedberg2177bab2013-03-05 20:37:43 +0200918 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200919 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200920
921 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200922 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200923
924 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200925 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200926
927 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200928 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200929
930 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200931 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300932
933 /* LE-only controllers have LE implicitly enabled */
934 if (!lmp_bredr_capable(hdev))
935 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200936}
937
938static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
939{
940 if (lmp_ext_inq_capable(hdev))
941 return 0x02;
942
943 if (lmp_inq_rssi_capable(hdev))
944 return 0x01;
945
946 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
947 hdev->lmp_subver == 0x0757)
948 return 0x01;
949
950 if (hdev->manufacturer == 15) {
951 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
952 return 0x01;
953 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
954 return 0x01;
955 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
956 return 0x01;
957 }
958
959 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
960 hdev->lmp_subver == 0x1805)
961 return 0x01;
962
963 return 0x00;
964}
965
Johan Hedberg42c6b122013-03-05 20:37:49 +0200966static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200967{
968 u8 mode;
969
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200971
Johan Hedberg42c6b122013-03-05 20:37:49 +0200972 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200973}
974
Johan Hedberg42c6b122013-03-05 20:37:49 +0200975static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200976{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200977 struct hci_dev *hdev = req->hdev;
978
Johan Hedberg2177bab2013-03-05 20:37:43 +0200979 /* The second byte is 0xff instead of 0x9f (two reserved bits
980 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
981 * command otherwise.
982 */
983 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
984
985 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
986 * any event mask for pre 1.2 devices.
987 */
988 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
989 return;
990
991 if (lmp_bredr_capable(hdev)) {
992 events[4] |= 0x01; /* Flow Specification Complete */
993 events[4] |= 0x02; /* Inquiry Result with RSSI */
994 events[4] |= 0x04; /* Read Remote Extended Features Complete */
995 events[5] |= 0x08; /* Synchronous Connection Complete */
996 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700997 } else {
998 /* Use a different default for LE-only devices */
999 memset(events, 0, sizeof(events));
1000 events[0] |= 0x10; /* Disconnection Complete */
1001 events[0] |= 0x80; /* Encryption Change */
1002 events[1] |= 0x08; /* Read Remote Version Information Complete */
1003 events[1] |= 0x20; /* Command Complete */
1004 events[1] |= 0x40; /* Command Status */
1005 events[1] |= 0x80; /* Hardware Error */
1006 events[2] |= 0x04; /* Number of Completed Packets */
1007 events[3] |= 0x02; /* Data Buffer Overflow */
1008 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001009 }
1010
1011 if (lmp_inq_rssi_capable(hdev))
1012 events[4] |= 0x02; /* Inquiry Result with RSSI */
1013
1014 if (lmp_sniffsubr_capable(hdev))
1015 events[5] |= 0x20; /* Sniff Subrating */
1016
1017 if (lmp_pause_enc_capable(hdev))
1018 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1019
1020 if (lmp_ext_inq_capable(hdev))
1021 events[5] |= 0x40; /* Extended Inquiry Result */
1022
1023 if (lmp_no_flush_capable(hdev))
1024 events[7] |= 0x01; /* Enhanced Flush Complete */
1025
1026 if (lmp_lsto_capable(hdev))
1027 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1028
1029 if (lmp_ssp_capable(hdev)) {
1030 events[6] |= 0x01; /* IO Capability Request */
1031 events[6] |= 0x02; /* IO Capability Response */
1032 events[6] |= 0x04; /* User Confirmation Request */
1033 events[6] |= 0x08; /* User Passkey Request */
1034 events[6] |= 0x10; /* Remote OOB Data Request */
1035 events[6] |= 0x20; /* Simple Pairing Complete */
1036 events[7] |= 0x04; /* User Passkey Notification */
1037 events[7] |= 0x08; /* Keypress Notification */
1038 events[7] |= 0x10; /* Remote Host Supported
1039 * Features Notification
1040 */
1041 }
1042
1043 if (lmp_le_capable(hdev))
1044 events[7] |= 0x20; /* LE Meta-Event */
1045
Johan Hedberg42c6b122013-03-05 20:37:49 +02001046 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001047
1048 if (lmp_le_capable(hdev)) {
1049 memset(events, 0, sizeof(events));
1050 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1052 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001053 }
1054}
1055
Johan Hedberg42c6b122013-03-05 20:37:49 +02001056static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001057{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058 struct hci_dev *hdev = req->hdev;
1059
Johan Hedberg2177bab2013-03-05 20:37:43 +02001060 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001061 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001062 else
1063 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001064
1065 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001066 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067
Johan Hedberg42c6b122013-03-05 20:37:49 +02001068 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001069
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001070 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1071 * local supported commands HCI command.
1072 */
1073 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
1076 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001077 /* When SSP is available, then the host features page
1078 * should also be available as well. However some
1079 * controllers list the max_page as 0 as long as SSP
1080 * has not been enabled. To achieve proper debugging
1081 * output, force the minimum max_page to 1 at least.
1082 */
1083 hdev->max_page = 0x01;
1084
Johan Hedberg2177bab2013-03-05 20:37:43 +02001085 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1086 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001087 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1088 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001089 } else {
1090 struct hci_cp_write_eir cp;
1091
1092 memset(hdev->eir, 0, sizeof(hdev->eir));
1093 memset(&cp, 0, sizeof(cp));
1094
Johan Hedberg42c6b122013-03-05 20:37:49 +02001095 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001096 }
1097 }
1098
1099 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001100 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001101
1102 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001103 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001104
1105 if (lmp_ext_feat_capable(hdev)) {
1106 struct hci_cp_read_local_ext_features cp;
1107
1108 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001109 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1110 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001111 }
1112
1113 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1114 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001115 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1116 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001117 }
1118}
1119
Johan Hedberg42c6b122013-03-05 20:37:49 +02001120static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001121{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001123 struct hci_cp_write_def_link_policy cp;
1124 u16 link_policy = 0;
1125
1126 if (lmp_rswitch_capable(hdev))
1127 link_policy |= HCI_LP_RSWITCH;
1128 if (lmp_hold_capable(hdev))
1129 link_policy |= HCI_LP_HOLD;
1130 if (lmp_sniff_capable(hdev))
1131 link_policy |= HCI_LP_SNIFF;
1132 if (lmp_park_capable(hdev))
1133 link_policy |= HCI_LP_PARK;
1134
1135 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001136 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001137}
1138
Johan Hedberg42c6b122013-03-05 20:37:49 +02001139static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001140{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001141 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001142 struct hci_cp_write_le_host_supported cp;
1143
Johan Hedbergc73eee92013-04-19 18:35:21 +03001144 /* LE-only devices do not support explicit enablement */
1145 if (!lmp_bredr_capable(hdev))
1146 return;
1147
Johan Hedberg2177bab2013-03-05 20:37:43 +02001148 memset(&cp, 0, sizeof(cp));
1149
1150 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1151 cp.le = 0x01;
1152 cp.simul = lmp_le_br_capable(hdev);
1153 }
1154
1155 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1157 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001158}
1159
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001160static void hci_set_event_mask_page_2(struct hci_request *req)
1161{
1162 struct hci_dev *hdev = req->hdev;
1163 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1164
1165 /* If Connectionless Slave Broadcast master role is supported
1166 * enable all necessary events for it.
1167 */
1168 if (hdev->features[2][0] & 0x01) {
1169 events[1] |= 0x40; /* Triggered Clock Capture */
1170 events[1] |= 0x80; /* Synchronization Train Complete */
1171 events[2] |= 0x10; /* Slave Page Response Timeout */
1172 events[2] |= 0x20; /* CSB Channel Map Change */
1173 }
1174
1175 /* If Connectionless Slave Broadcast slave role is supported
1176 * enable all necessary events for it.
1177 */
1178 if (hdev->features[2][0] & 0x02) {
1179 events[2] |= 0x01; /* Synchronization Train Received */
1180 events[2] |= 0x02; /* CSB Receive */
1181 events[2] |= 0x04; /* CSB Timeout */
1182 events[2] |= 0x08; /* Truncated Page Complete */
1183 }
1184
1185 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1186}
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001189{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001191 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001192
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001193 /* Some Broadcom based Bluetooth controllers do not support the
1194 * Delete Stored Link Key command. They are clearly indicating its
1195 * absence in the bit mask of supported commands.
1196 *
1197 * Check the supported commands and only if the the command is marked
1198 * as supported send it. If not supported assume that the controller
1199 * does not have actual support for stored link keys which makes this
1200 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001201 */
Johan Hedberg59f45d52013-06-13 11:01:13 +03001202 if (hdev->commands[6] & 0x80) {
1203 struct hci_cp_delete_stored_link_key cp;
1204
1205 bacpy(&cp.bdaddr, BDADDR_ANY);
1206 cp.delete_all = 0x01;
1207 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1208 sizeof(cp), &cp);
1209 }
1210
Johan Hedberg2177bab2013-03-05 20:37:43 +02001211 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001212 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001213
Marcel Holtmann79830f62013-10-18 16:38:09 -07001214 if (lmp_le_capable(hdev)) {
1215 /* If the controller has a public BD_ADDR, then by
1216 * default use that one. If this is a LE only
1217 * controller without one, default to the random
1218 * address.
1219 */
1220 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1221 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1222 else
1223 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1224
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001226 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001227
1228 /* Read features beyond page 1 if available */
1229 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1230 struct hci_cp_read_local_ext_features cp;
1231
1232 cp.page = p;
1233 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1234 sizeof(cp), &cp);
1235 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001236}
1237
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001238static void hci_init4_req(struct hci_request *req, unsigned long opt)
1239{
1240 struct hci_dev *hdev = req->hdev;
1241
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001242 /* Set event mask page 2 if the HCI command for it is supported */
1243 if (hdev->commands[22] & 0x04)
1244 hci_set_event_mask_page_2(req);
1245
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001246 /* Check for Synchronization Train support */
1247 if (hdev->features[2][0] & 0x04)
1248 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1249}
1250
Johan Hedberg2177bab2013-03-05 20:37:43 +02001251static int __hci_init(struct hci_dev *hdev)
1252{
1253 int err;
1254
1255 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1256 if (err < 0)
1257 return err;
1258
1259 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1260 * BR/EDR/LE type controllers. AMP controllers only need the
1261 * first stage init.
1262 */
1263 if (hdev->dev_type != HCI_BREDR)
1264 return 0;
1265
1266 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1267 if (err < 0)
1268 return err;
1269
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001270 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1271 if (err < 0)
1272 return err;
1273
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001274 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1275 if (err < 0)
1276 return err;
1277
1278 /* Only create debugfs entries during the initial setup
1279 * phase and not every time the controller gets powered on.
1280 */
1281 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1282 return 0;
1283
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001284 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1285 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001286 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1287 &hdev->manufacturer);
1288 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1289 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001290 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1291 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001292 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1293
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001294 if (lmp_bredr_capable(hdev)) {
1295 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1296 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001297 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1298 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001299 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1300 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001301 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1302 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001303 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1304 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001305 }
1306
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001307 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001308 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1309 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001310 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1311 hdev, &ssp_debug_mode_fops);
1312 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001313
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001314 if (lmp_sniff_capable(hdev)) {
1315 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1316 hdev, &idle_timeout_fops);
1317 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1318 hdev, &sniff_min_interval_fops);
1319 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1320 hdev, &sniff_max_interval_fops);
1321 }
1322
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001323 if (lmp_le_capable(hdev)) {
1324 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1325 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001326 debugfs_create_file("static_address", 0444, hdev->debugfs,
1327 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001328 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1329 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001330 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1331 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001332 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1333 hdev, &conn_min_interval_fops);
1334 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1335 hdev, &conn_max_interval_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001336 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001337
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001338 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339}
1340
Johan Hedberg42c6b122013-03-05 20:37:49 +02001341static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342{
1343 __u8 scan = opt;
1344
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349}
1350
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352{
1353 __u8 auth = opt;
1354
Johan Hedberg42c6b122013-03-05 20:37:49 +02001355 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
1357 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359}
1360
Johan Hedberg42c6b122013-03-05 20:37:49 +02001361static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362{
1363 __u8 encrypt = opt;
1364
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001367 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001368 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369}
1370
Johan Hedberg42c6b122013-03-05 20:37:49 +02001371static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001372{
1373 __le16 policy = cpu_to_le16(opt);
1374
Johan Hedberg42c6b122013-03-05 20:37:49 +02001375 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001376
1377 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001379}
1380
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001381/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 * Device is held on return. */
1383struct hci_dev *hci_dev_get(int index)
1384{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001385 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
1387 BT_DBG("%d", index);
1388
1389 if (index < 0)
1390 return NULL;
1391
1392 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001393 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 if (d->id == index) {
1395 hdev = hci_dev_hold(d);
1396 break;
1397 }
1398 }
1399 read_unlock(&hci_dev_list_lock);
1400 return hdev;
1401}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
1403/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001404
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001405bool hci_discovery_active(struct hci_dev *hdev)
1406{
1407 struct discovery_state *discov = &hdev->discovery;
1408
Andre Guedes6fbe1952012-02-03 17:47:58 -03001409 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001410 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001411 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001412 return true;
1413
Andre Guedes6fbe1952012-02-03 17:47:58 -03001414 default:
1415 return false;
1416 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001417}
1418
Johan Hedbergff9ef572012-01-04 14:23:45 +02001419void hci_discovery_set_state(struct hci_dev *hdev, int state)
1420{
1421 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1422
1423 if (hdev->discovery.state == state)
1424 return;
1425
1426 switch (state) {
1427 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001428 if (hdev->discovery.state != DISCOVERY_STARTING)
1429 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001430 break;
1431 case DISCOVERY_STARTING:
1432 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001433 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001434 mgmt_discovering(hdev, 1);
1435 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001436 case DISCOVERY_RESOLVING:
1437 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001438 case DISCOVERY_STOPPING:
1439 break;
1440 }
1441
1442 hdev->discovery.state = state;
1443}
1444
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001445void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446{
Johan Hedberg30883512012-01-04 14:16:21 +02001447 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001448 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Johan Hedberg561aafb2012-01-04 13:31:59 +02001450 list_for_each_entry_safe(p, n, &cache->all, all) {
1451 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001452 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001454
1455 INIT_LIST_HEAD(&cache->unknown);
1456 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457}
1458
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001459struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1460 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461{
Johan Hedberg30883512012-01-04 14:16:21 +02001462 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 struct inquiry_entry *e;
1464
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001465 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
Johan Hedberg561aafb2012-01-04 13:31:59 +02001467 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001469 return e;
1470 }
1471
1472 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473}
1474
Johan Hedberg561aafb2012-01-04 13:31:59 +02001475struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001476 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001477{
Johan Hedberg30883512012-01-04 14:16:21 +02001478 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001479 struct inquiry_entry *e;
1480
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001481 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001482
1483 list_for_each_entry(e, &cache->unknown, list) {
1484 if (!bacmp(&e->data.bdaddr, bdaddr))
1485 return e;
1486 }
1487
1488 return NULL;
1489}
1490
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001491struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001492 bdaddr_t *bdaddr,
1493 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001494{
1495 struct discovery_state *cache = &hdev->discovery;
1496 struct inquiry_entry *e;
1497
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001498 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001499
1500 list_for_each_entry(e, &cache->resolve, list) {
1501 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1502 return e;
1503 if (!bacmp(&e->data.bdaddr, bdaddr))
1504 return e;
1505 }
1506
1507 return NULL;
1508}
1509
Johan Hedberga3d4e202012-01-09 00:53:02 +02001510void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001511 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001512{
1513 struct discovery_state *cache = &hdev->discovery;
1514 struct list_head *pos = &cache->resolve;
1515 struct inquiry_entry *p;
1516
1517 list_del(&ie->list);
1518
1519 list_for_each_entry(p, &cache->resolve, list) {
1520 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001521 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001522 break;
1523 pos = &p->list;
1524 }
1525
1526 list_add(&ie->list, pos);
1527}
1528
Johan Hedberg31754052012-01-04 13:39:52 +02001529bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001530 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531{
Johan Hedberg30883512012-01-04 14:16:21 +02001532 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001533 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001535 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
Szymon Janc2b2fec42012-11-20 11:38:54 +01001537 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1538
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001539 if (ssp)
1540 *ssp = data->ssp_mode;
1541
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001542 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001543 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001544 if (ie->data.ssp_mode && ssp)
1545 *ssp = true;
1546
Johan Hedberga3d4e202012-01-09 00:53:02 +02001547 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001548 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001549 ie->data.rssi = data->rssi;
1550 hci_inquiry_cache_update_resolve(hdev, ie);
1551 }
1552
Johan Hedberg561aafb2012-01-04 13:31:59 +02001553 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001554 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001555
Johan Hedberg561aafb2012-01-04 13:31:59 +02001556 /* Entry not in the cache. Add new one. */
1557 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1558 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001559 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001560
1561 list_add(&ie->all, &cache->all);
1562
1563 if (name_known) {
1564 ie->name_state = NAME_KNOWN;
1565 } else {
1566 ie->name_state = NAME_NOT_KNOWN;
1567 list_add(&ie->list, &cache->unknown);
1568 }
1569
1570update:
1571 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001572 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001573 ie->name_state = NAME_KNOWN;
1574 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 }
1576
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001577 memcpy(&ie->data, data, sizeof(*data));
1578 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001580
1581 if (ie->name_state == NAME_NOT_KNOWN)
1582 return false;
1583
1584 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585}
1586
1587static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1588{
Johan Hedberg30883512012-01-04 14:16:21 +02001589 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 struct inquiry_info *info = (struct inquiry_info *) buf;
1591 struct inquiry_entry *e;
1592 int copied = 0;
1593
Johan Hedberg561aafb2012-01-04 13:31:59 +02001594 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001596
1597 if (copied >= num)
1598 break;
1599
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 bacpy(&info->bdaddr, &data->bdaddr);
1601 info->pscan_rep_mode = data->pscan_rep_mode;
1602 info->pscan_period_mode = data->pscan_period_mode;
1603 info->pscan_mode = data->pscan_mode;
1604 memcpy(info->dev_class, data->dev_class, 3);
1605 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001606
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001608 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 }
1610
1611 BT_DBG("cache %p, copied %d", cache, copied);
1612 return copied;
1613}
1614
Johan Hedberg42c6b122013-03-05 20:37:49 +02001615static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
1617 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001618 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 struct hci_cp_inquiry cp;
1620
1621 BT_DBG("%s", hdev->name);
1622
1623 if (test_bit(HCI_INQUIRY, &hdev->flags))
1624 return;
1625
1626 /* Start Inquiry */
1627 memcpy(&cp.lap, &ir->lap, 3);
1628 cp.length = ir->length;
1629 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001630 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631}
1632
Andre Guedes3e13fa12013-03-27 20:04:56 -03001633static int wait_inquiry(void *word)
1634{
1635 schedule();
1636 return signal_pending(current);
1637}
1638
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639int hci_inquiry(void __user *arg)
1640{
1641 __u8 __user *ptr = arg;
1642 struct hci_inquiry_req ir;
1643 struct hci_dev *hdev;
1644 int err = 0, do_inquiry = 0, max_rsp;
1645 long timeo;
1646 __u8 *buf;
1647
1648 if (copy_from_user(&ir, ptr, sizeof(ir)))
1649 return -EFAULT;
1650
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001651 hdev = hci_dev_get(ir.dev_id);
1652 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 return -ENODEV;
1654
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001655 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1656 err = -EBUSY;
1657 goto done;
1658 }
1659
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001660 if (hdev->dev_type != HCI_BREDR) {
1661 err = -EOPNOTSUPP;
1662 goto done;
1663 }
1664
Johan Hedberg56f87902013-10-02 13:43:13 +03001665 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1666 err = -EOPNOTSUPP;
1667 goto done;
1668 }
1669
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001670 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001671 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001672 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001673 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 do_inquiry = 1;
1675 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001676 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Marcel Holtmann04837f62006-07-03 10:02:33 +02001678 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001679
1680 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001681 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1682 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001683 if (err < 0)
1684 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001685
1686 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1687 * cleared). If it is interrupted by a signal, return -EINTR.
1688 */
1689 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1690 TASK_INTERRUPTIBLE))
1691 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001694 /* for unlimited number of responses we will use buffer with
1695 * 255 entries
1696 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1698
1699 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1700 * copy it to the user space.
1701 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001702 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001703 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 err = -ENOMEM;
1705 goto done;
1706 }
1707
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001708 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001710 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
1712 BT_DBG("num_rsp %d", ir.num_rsp);
1713
1714 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1715 ptr += sizeof(ir);
1716 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001717 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001719 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 err = -EFAULT;
1721
1722 kfree(buf);
1723
1724done:
1725 hci_dev_put(hdev);
1726 return err;
1727}
1728
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001729static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 int ret = 0;
1732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 BT_DBG("%s %p", hdev->name, hdev);
1734
1735 hci_req_lock(hdev);
1736
Johan Hovold94324962012-03-15 14:48:41 +01001737 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1738 ret = -ENODEV;
1739 goto done;
1740 }
1741
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001742 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1743 /* Check for rfkill but allow the HCI setup stage to
1744 * proceed (which in itself doesn't cause any RF activity).
1745 */
1746 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1747 ret = -ERFKILL;
1748 goto done;
1749 }
1750
1751 /* Check for valid public address or a configured static
1752 * random adddress, but let the HCI setup proceed to
1753 * be able to determine if there is a public address
1754 * or not.
1755 *
1756 * This check is only valid for BR/EDR controllers
1757 * since AMP controllers do not have an address.
1758 */
1759 if (hdev->dev_type == HCI_BREDR &&
1760 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1761 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1762 ret = -EADDRNOTAVAIL;
1763 goto done;
1764 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001765 }
1766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 if (test_bit(HCI_UP, &hdev->flags)) {
1768 ret = -EALREADY;
1769 goto done;
1770 }
1771
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 if (hdev->open(hdev)) {
1773 ret = -EIO;
1774 goto done;
1775 }
1776
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001777 atomic_set(&hdev->cmd_cnt, 1);
1778 set_bit(HCI_INIT, &hdev->flags);
1779
1780 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1781 ret = hdev->setup(hdev);
1782
1783 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001784 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1785 set_bit(HCI_RAW, &hdev->flags);
1786
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001787 if (!test_bit(HCI_RAW, &hdev->flags) &&
1788 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001789 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 }
1791
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001792 clear_bit(HCI_INIT, &hdev->flags);
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 if (!ret) {
1795 hci_dev_hold(hdev);
1796 set_bit(HCI_UP, &hdev->flags);
1797 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001798 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001799 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001800 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001801 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001802 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001803 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001804 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001805 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001807 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001808 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001809 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
1811 skb_queue_purge(&hdev->cmd_q);
1812 skb_queue_purge(&hdev->rx_q);
1813
1814 if (hdev->flush)
1815 hdev->flush(hdev);
1816
1817 if (hdev->sent_cmd) {
1818 kfree_skb(hdev->sent_cmd);
1819 hdev->sent_cmd = NULL;
1820 }
1821
1822 hdev->close(hdev);
1823 hdev->flags = 0;
1824 }
1825
1826done:
1827 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 return ret;
1829}
1830
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001831/* ---- HCI ioctl helpers ---- */
1832
1833int hci_dev_open(__u16 dev)
1834{
1835 struct hci_dev *hdev;
1836 int err;
1837
1838 hdev = hci_dev_get(dev);
1839 if (!hdev)
1840 return -ENODEV;
1841
Johan Hedberge1d08f42013-10-01 22:44:50 +03001842 /* We need to ensure that no other power on/off work is pending
1843 * before proceeding to call hci_dev_do_open. This is
1844 * particularly important if the setup procedure has not yet
1845 * completed.
1846 */
1847 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1848 cancel_delayed_work(&hdev->power_off);
1849
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001850 /* After this call it is guaranteed that the setup procedure
1851 * has finished. This means that error conditions like RFKILL
1852 * or no valid public or static random address apply.
1853 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001854 flush_workqueue(hdev->req_workqueue);
1855
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001856 err = hci_dev_do_open(hdev);
1857
1858 hci_dev_put(hdev);
1859
1860 return err;
1861}
1862
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863static int hci_dev_do_close(struct hci_dev *hdev)
1864{
1865 BT_DBG("%s %p", hdev->name, hdev);
1866
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001867 cancel_delayed_work(&hdev->power_off);
1868
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 hci_req_cancel(hdev, ENODEV);
1870 hci_req_lock(hdev);
1871
1872 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001873 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 hci_req_unlock(hdev);
1875 return 0;
1876 }
1877
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001878 /* Flush RX and TX works */
1879 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001880 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001882 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001883 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001884 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001885 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001886 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001887 }
1888
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001889 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001890 cancel_delayed_work(&hdev->service_cache);
1891
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001892 cancel_delayed_work_sync(&hdev->le_scan_disable);
1893
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001894 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001895 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001897 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
1899 hci_notify(hdev, HCI_DEV_DOWN);
1900
1901 if (hdev->flush)
1902 hdev->flush(hdev);
1903
1904 /* Reset device */
1905 skb_queue_purge(&hdev->cmd_q);
1906 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001907 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001908 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001909 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001911 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 clear_bit(HCI_INIT, &hdev->flags);
1913 }
1914
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001915 /* flush cmd work */
1916 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917
1918 /* Drop queues */
1919 skb_queue_purge(&hdev->rx_q);
1920 skb_queue_purge(&hdev->cmd_q);
1921 skb_queue_purge(&hdev->raw_q);
1922
1923 /* Drop last sent command */
1924 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001925 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 kfree_skb(hdev->sent_cmd);
1927 hdev->sent_cmd = NULL;
1928 }
1929
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001930 kfree_skb(hdev->recv_evt);
1931 hdev->recv_evt = NULL;
1932
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 /* After this point our queues are empty
1934 * and no tasks are scheduled. */
1935 hdev->close(hdev);
1936
Johan Hedberg35b973c2013-03-15 17:06:59 -05001937 /* Clear flags */
1938 hdev->flags = 0;
1939 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1940
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001941 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1942 if (hdev->dev_type == HCI_BREDR) {
1943 hci_dev_lock(hdev);
1944 mgmt_powered(hdev, 0);
1945 hci_dev_unlock(hdev);
1946 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001947 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001948
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001949 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001950 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001951
Johan Hedberge59fda82012-02-22 18:11:53 +02001952 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001953 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 hci_req_unlock(hdev);
1956
1957 hci_dev_put(hdev);
1958 return 0;
1959}
1960
1961int hci_dev_close(__u16 dev)
1962{
1963 struct hci_dev *hdev;
1964 int err;
1965
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001966 hdev = hci_dev_get(dev);
1967 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001969
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001970 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1971 err = -EBUSY;
1972 goto done;
1973 }
1974
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001975 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1976 cancel_delayed_work(&hdev->power_off);
1977
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001979
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001980done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 hci_dev_put(hdev);
1982 return err;
1983}
1984
1985int hci_dev_reset(__u16 dev)
1986{
1987 struct hci_dev *hdev;
1988 int ret = 0;
1989
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001990 hdev = hci_dev_get(dev);
1991 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 return -ENODEV;
1993
1994 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Marcel Holtmann808a0492013-08-26 20:57:58 -07001996 if (!test_bit(HCI_UP, &hdev->flags)) {
1997 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001999 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002001 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2002 ret = -EBUSY;
2003 goto done;
2004 }
2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 /* Drop queues */
2007 skb_queue_purge(&hdev->rx_q);
2008 skb_queue_purge(&hdev->cmd_q);
2009
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002010 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002011 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002013 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 if (hdev->flush)
2016 hdev->flush(hdev);
2017
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002018 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002019 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
2021 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002022 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 hci_req_unlock(hdev);
2026 hci_dev_put(hdev);
2027 return ret;
2028}
2029
2030int hci_dev_reset_stat(__u16 dev)
2031{
2032 struct hci_dev *hdev;
2033 int ret = 0;
2034
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002035 hdev = hci_dev_get(dev);
2036 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 return -ENODEV;
2038
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002039 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2040 ret = -EBUSY;
2041 goto done;
2042 }
2043
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2045
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002046done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 return ret;
2049}
2050
2051int hci_dev_cmd(unsigned int cmd, void __user *arg)
2052{
2053 struct hci_dev *hdev;
2054 struct hci_dev_req dr;
2055 int err = 0;
2056
2057 if (copy_from_user(&dr, arg, sizeof(dr)))
2058 return -EFAULT;
2059
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002060 hdev = hci_dev_get(dr.dev_id);
2061 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 return -ENODEV;
2063
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002064 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2065 err = -EBUSY;
2066 goto done;
2067 }
2068
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002069 if (hdev->dev_type != HCI_BREDR) {
2070 err = -EOPNOTSUPP;
2071 goto done;
2072 }
2073
Johan Hedberg56f87902013-10-02 13:43:13 +03002074 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2075 err = -EOPNOTSUPP;
2076 goto done;
2077 }
2078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 switch (cmd) {
2080 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002081 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2082 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 break;
2084
2085 case HCISETENCRYPT:
2086 if (!lmp_encrypt_capable(hdev)) {
2087 err = -EOPNOTSUPP;
2088 break;
2089 }
2090
2091 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2092 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002093 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2094 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 if (err)
2096 break;
2097 }
2098
Johan Hedberg01178cd2013-03-05 20:37:41 +02002099 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2100 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 break;
2102
2103 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002104 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2105 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 break;
2107
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002108 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002109 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2110 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002111 break;
2112
2113 case HCISETLINKMODE:
2114 hdev->link_mode = ((__u16) dr.dev_opt) &
2115 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2116 break;
2117
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 case HCISETPTYPE:
2119 hdev->pkt_type = (__u16) dr.dev_opt;
2120 break;
2121
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002123 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2124 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 break;
2126
2127 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002128 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2129 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 break;
2131
2132 default:
2133 err = -EINVAL;
2134 break;
2135 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002136
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002137done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 hci_dev_put(hdev);
2139 return err;
2140}
2141
2142int hci_get_dev_list(void __user *arg)
2143{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002144 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 struct hci_dev_list_req *dl;
2146 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 int n = 0, size, err;
2148 __u16 dev_num;
2149
2150 if (get_user(dev_num, (__u16 __user *) arg))
2151 return -EFAULT;
2152
2153 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2154 return -EINVAL;
2155
2156 size = sizeof(*dl) + dev_num * sizeof(*dr);
2157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 dl = kzalloc(size, GFP_KERNEL);
2159 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return -ENOMEM;
2161
2162 dr = dl->dev_req;
2163
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002164 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002165 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002166 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002167 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002168
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002169 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2170 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002171
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 (dr + n)->dev_id = hdev->id;
2173 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002174
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 if (++n >= dev_num)
2176 break;
2177 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002178 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
2180 dl->dev_num = n;
2181 size = sizeof(*dl) + n * sizeof(*dr);
2182
2183 err = copy_to_user(arg, dl, size);
2184 kfree(dl);
2185
2186 return err ? -EFAULT : 0;
2187}
2188
2189int hci_get_dev_info(void __user *arg)
2190{
2191 struct hci_dev *hdev;
2192 struct hci_dev_info di;
2193 int err = 0;
2194
2195 if (copy_from_user(&di, arg, sizeof(di)))
2196 return -EFAULT;
2197
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002198 hdev = hci_dev_get(di.dev_id);
2199 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 return -ENODEV;
2201
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002202 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002203 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002204
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002205 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2206 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002207
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 strcpy(di.name, hdev->name);
2209 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002210 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 di.flags = hdev->flags;
2212 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002213 if (lmp_bredr_capable(hdev)) {
2214 di.acl_mtu = hdev->acl_mtu;
2215 di.acl_pkts = hdev->acl_pkts;
2216 di.sco_mtu = hdev->sco_mtu;
2217 di.sco_pkts = hdev->sco_pkts;
2218 } else {
2219 di.acl_mtu = hdev->le_mtu;
2220 di.acl_pkts = hdev->le_pkts;
2221 di.sco_mtu = 0;
2222 di.sco_pkts = 0;
2223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 di.link_policy = hdev->link_policy;
2225 di.link_mode = hdev->link_mode;
2226
2227 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2228 memcpy(&di.features, &hdev->features, sizeof(di.features));
2229
2230 if (copy_to_user(arg, &di, sizeof(di)))
2231 err = -EFAULT;
2232
2233 hci_dev_put(hdev);
2234
2235 return err;
2236}
2237
2238/* ---- Interface to HCI drivers ---- */
2239
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002240static int hci_rfkill_set_block(void *data, bool blocked)
2241{
2242 struct hci_dev *hdev = data;
2243
2244 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2245
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002246 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2247 return -EBUSY;
2248
Johan Hedberg5e130362013-09-13 08:58:17 +03002249 if (blocked) {
2250 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002251 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2252 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002253 } else {
2254 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002255 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002256
2257 return 0;
2258}
2259
2260static const struct rfkill_ops hci_rfkill_ops = {
2261 .set_block = hci_rfkill_set_block,
2262};
2263
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002264static void hci_power_on(struct work_struct *work)
2265{
2266 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002267 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002268
2269 BT_DBG("%s", hdev->name);
2270
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002271 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002272 if (err < 0) {
2273 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002274 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002275 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002276
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002277 /* During the HCI setup phase, a few error conditions are
2278 * ignored and they need to be checked now. If they are still
2279 * valid, it is important to turn the device back off.
2280 */
2281 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2282 (hdev->dev_type == HCI_BREDR &&
2283 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2284 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002285 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2286 hci_dev_do_close(hdev);
2287 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002288 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2289 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002290 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002291
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002292 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002293 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002294}
2295
2296static void hci_power_off(struct work_struct *work)
2297{
Johan Hedberg32435532011-11-07 22:16:04 +02002298 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002299 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002300
2301 BT_DBG("%s", hdev->name);
2302
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002303 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002304}
2305
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002306static void hci_discov_off(struct work_struct *work)
2307{
2308 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002309
2310 hdev = container_of(work, struct hci_dev, discov_off.work);
2311
2312 BT_DBG("%s", hdev->name);
2313
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002314 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002315}
2316
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002317int hci_uuids_clear(struct hci_dev *hdev)
2318{
Johan Hedberg48210022013-01-27 00:31:28 +02002319 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002320
Johan Hedberg48210022013-01-27 00:31:28 +02002321 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2322 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002323 kfree(uuid);
2324 }
2325
2326 return 0;
2327}
2328
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002329int hci_link_keys_clear(struct hci_dev *hdev)
2330{
2331 struct list_head *p, *n;
2332
2333 list_for_each_safe(p, n, &hdev->link_keys) {
2334 struct link_key *key;
2335
2336 key = list_entry(p, struct link_key, list);
2337
2338 list_del(p);
2339 kfree(key);
2340 }
2341
2342 return 0;
2343}
2344
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002345int hci_smp_ltks_clear(struct hci_dev *hdev)
2346{
2347 struct smp_ltk *k, *tmp;
2348
2349 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2350 list_del(&k->list);
2351 kfree(k);
2352 }
2353
2354 return 0;
2355}
2356
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002357struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2358{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002359 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002360
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002361 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002362 if (bacmp(bdaddr, &k->bdaddr) == 0)
2363 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002364
2365 return NULL;
2366}
2367
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302368static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002369 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002370{
2371 /* Legacy key */
2372 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302373 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002374
2375 /* Debug keys are insecure so don't store them persistently */
2376 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302377 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002378
2379 /* Changed combination key and there's no previous one */
2380 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302381 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002382
2383 /* Security mode 3 case */
2384 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302385 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002386
2387 /* Neither local nor remote side had no-bonding as requirement */
2388 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302389 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002390
2391 /* Local side had dedicated bonding as requirement */
2392 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302393 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002394
2395 /* Remote side had dedicated bonding as requirement */
2396 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302397 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002398
2399 /* If none of the above criteria match, then don't store the key
2400 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302401 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002402}
2403
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002404struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002405{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002406 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002407
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002408 list_for_each_entry(k, &hdev->long_term_keys, list) {
2409 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002410 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002411 continue;
2412
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002413 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002414 }
2415
2416 return NULL;
2417}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002418
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002419struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002420 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002421{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002422 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002423
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002424 list_for_each_entry(k, &hdev->long_term_keys, list)
2425 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002426 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002427 return k;
2428
2429 return NULL;
2430}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002431
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002432int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002433 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002434{
2435 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302436 u8 old_key_type;
2437 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002438
2439 old_key = hci_find_link_key(hdev, bdaddr);
2440 if (old_key) {
2441 old_key_type = old_key->type;
2442 key = old_key;
2443 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002444 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002445 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2446 if (!key)
2447 return -ENOMEM;
2448 list_add(&key->list, &hdev->link_keys);
2449 }
2450
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002451 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002452
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002453 /* Some buggy controller combinations generate a changed
2454 * combination key for legacy pairing even when there's no
2455 * previous key */
2456 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002457 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002458 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002459 if (conn)
2460 conn->key_type = type;
2461 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002462
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002463 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002464 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002465 key->pin_len = pin_len;
2466
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002467 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002468 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002469 else
2470 key->type = type;
2471
Johan Hedberg4df378a2011-04-28 11:29:03 -07002472 if (!new_key)
2473 return 0;
2474
2475 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2476
Johan Hedberg744cf192011-11-08 20:40:14 +02002477 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002478
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302479 if (conn)
2480 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002481
2482 return 0;
2483}
2484
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002485int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002486 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002487 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002488{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002489 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002490
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002491 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2492 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002493
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002494 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2495 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002496 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002497 else {
2498 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002499 if (!key)
2500 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002501 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002502 }
2503
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002504 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002505 key->bdaddr_type = addr_type;
2506 memcpy(key->val, tk, sizeof(key->val));
2507 key->authenticated = authenticated;
2508 key->ediv = ediv;
2509 key->enc_size = enc_size;
2510 key->type = type;
2511 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002512
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002513 if (!new_key)
2514 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002515
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002516 if (type & HCI_SMP_LTK)
2517 mgmt_new_ltk(hdev, key, 1);
2518
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002519 return 0;
2520}
2521
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002522int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2523{
2524 struct link_key *key;
2525
2526 key = hci_find_link_key(hdev, bdaddr);
2527 if (!key)
2528 return -ENOENT;
2529
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002530 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002531
2532 list_del(&key->list);
2533 kfree(key);
2534
2535 return 0;
2536}
2537
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002538int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2539{
2540 struct smp_ltk *k, *tmp;
2541
2542 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2543 if (bacmp(bdaddr, &k->bdaddr))
2544 continue;
2545
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002546 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002547
2548 list_del(&k->list);
2549 kfree(k);
2550 }
2551
2552 return 0;
2553}
2554
Ville Tervo6bd32322011-02-16 16:32:41 +02002555/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002556static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002557{
2558 struct hci_dev *hdev = (void *) arg;
2559
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002560 if (hdev->sent_cmd) {
2561 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2562 u16 opcode = __le16_to_cpu(sent->opcode);
2563
2564 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2565 } else {
2566 BT_ERR("%s command tx timeout", hdev->name);
2567 }
2568
Ville Tervo6bd32322011-02-16 16:32:41 +02002569 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002570 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002571}
2572
Szymon Janc2763eda2011-03-22 13:12:22 +01002573struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002574 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002575{
2576 struct oob_data *data;
2577
2578 list_for_each_entry(data, &hdev->remote_oob_data, list)
2579 if (bacmp(bdaddr, &data->bdaddr) == 0)
2580 return data;
2581
2582 return NULL;
2583}
2584
2585int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2586{
2587 struct oob_data *data;
2588
2589 data = hci_find_remote_oob_data(hdev, bdaddr);
2590 if (!data)
2591 return -ENOENT;
2592
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002593 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002594
2595 list_del(&data->list);
2596 kfree(data);
2597
2598 return 0;
2599}
2600
2601int hci_remote_oob_data_clear(struct hci_dev *hdev)
2602{
2603 struct oob_data *data, *n;
2604
2605 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2606 list_del(&data->list);
2607 kfree(data);
2608 }
2609
2610 return 0;
2611}
2612
2613int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002614 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002615{
2616 struct oob_data *data;
2617
2618 data = hci_find_remote_oob_data(hdev, bdaddr);
2619
2620 if (!data) {
2621 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2622 if (!data)
2623 return -ENOMEM;
2624
2625 bacpy(&data->bdaddr, bdaddr);
2626 list_add(&data->list, &hdev->remote_oob_data);
2627 }
2628
2629 memcpy(data->hash, hash, sizeof(data->hash));
2630 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2631
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002632 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002633
2634 return 0;
2635}
2636
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002637struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2638 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002639{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002640 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002641
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002642 list_for_each_entry(b, &hdev->blacklist, list) {
2643 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002644 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002645 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002646
2647 return NULL;
2648}
2649
2650int hci_blacklist_clear(struct hci_dev *hdev)
2651{
2652 struct list_head *p, *n;
2653
2654 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002655 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002656
2657 list_del(p);
2658 kfree(b);
2659 }
2660
2661 return 0;
2662}
2663
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002664int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002665{
2666 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002667
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002668 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002669 return -EBADF;
2670
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002671 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002672 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002673
2674 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002675 if (!entry)
2676 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002677
2678 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002679 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002680
2681 list_add(&entry->list, &hdev->blacklist);
2682
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002683 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002684}
2685
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002686int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002687{
2688 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002689
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002690 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002691 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002692
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002693 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002694 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002695 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002696
2697 list_del(&entry->list);
2698 kfree(entry);
2699
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002700 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002701}
2702
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002703static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002704{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002705 if (status) {
2706 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002707
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002708 hci_dev_lock(hdev);
2709 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2710 hci_dev_unlock(hdev);
2711 return;
2712 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002713}
2714
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002715static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002716{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002717 /* General inquiry access code (GIAC) */
2718 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2719 struct hci_request req;
2720 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002721 int err;
2722
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002723 if (status) {
2724 BT_ERR("Failed to disable LE scanning: status %d", status);
2725 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002726 }
2727
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002728 switch (hdev->discovery.type) {
2729 case DISCOV_TYPE_LE:
2730 hci_dev_lock(hdev);
2731 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2732 hci_dev_unlock(hdev);
2733 break;
2734
2735 case DISCOV_TYPE_INTERLEAVED:
2736 hci_req_init(&req, hdev);
2737
2738 memset(&cp, 0, sizeof(cp));
2739 memcpy(&cp.lap, lap, sizeof(cp.lap));
2740 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2741 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2742
2743 hci_dev_lock(hdev);
2744
2745 hci_inquiry_cache_flush(hdev);
2746
2747 err = hci_req_run(&req, inquiry_complete);
2748 if (err) {
2749 BT_ERR("Inquiry request failed: err %d", err);
2750 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2751 }
2752
2753 hci_dev_unlock(hdev);
2754 break;
2755 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002756}
2757
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002758static void le_scan_disable_work(struct work_struct *work)
2759{
2760 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002761 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002762 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002763 struct hci_request req;
2764 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002765
2766 BT_DBG("%s", hdev->name);
2767
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002768 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002769
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002770 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002771 cp.enable = LE_SCAN_DISABLE;
2772 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002773
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002774 err = hci_req_run(&req, le_scan_disable_work_complete);
2775 if (err)
2776 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002777}
2778
David Herrmann9be0dab2012-04-22 14:39:57 +02002779/* Alloc HCI device */
2780struct hci_dev *hci_alloc_dev(void)
2781{
2782 struct hci_dev *hdev;
2783
2784 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2785 if (!hdev)
2786 return NULL;
2787
David Herrmannb1b813d2012-04-22 14:39:58 +02002788 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2789 hdev->esco_type = (ESCO_HV1);
2790 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002791 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2792 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002793 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2794 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002795
David Herrmannb1b813d2012-04-22 14:39:58 +02002796 hdev->sniff_max_interval = 800;
2797 hdev->sniff_min_interval = 80;
2798
Marcel Holtmannbef64732013-10-11 08:23:19 -07002799 hdev->le_scan_interval = 0x0060;
2800 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002801 hdev->le_conn_min_interval = 0x0028;
2802 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002803
David Herrmannb1b813d2012-04-22 14:39:58 +02002804 mutex_init(&hdev->lock);
2805 mutex_init(&hdev->req_lock);
2806
2807 INIT_LIST_HEAD(&hdev->mgmt_pending);
2808 INIT_LIST_HEAD(&hdev->blacklist);
2809 INIT_LIST_HEAD(&hdev->uuids);
2810 INIT_LIST_HEAD(&hdev->link_keys);
2811 INIT_LIST_HEAD(&hdev->long_term_keys);
2812 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002813 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002814
2815 INIT_WORK(&hdev->rx_work, hci_rx_work);
2816 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2817 INIT_WORK(&hdev->tx_work, hci_tx_work);
2818 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002819
David Herrmannb1b813d2012-04-22 14:39:58 +02002820 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2821 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2822 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2823
David Herrmannb1b813d2012-04-22 14:39:58 +02002824 skb_queue_head_init(&hdev->rx_q);
2825 skb_queue_head_init(&hdev->cmd_q);
2826 skb_queue_head_init(&hdev->raw_q);
2827
2828 init_waitqueue_head(&hdev->req_wait_q);
2829
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002830 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002831
David Herrmannb1b813d2012-04-22 14:39:58 +02002832 hci_init_sysfs(hdev);
2833 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002834
2835 return hdev;
2836}
2837EXPORT_SYMBOL(hci_alloc_dev);
2838
2839/* Free HCI device */
2840void hci_free_dev(struct hci_dev *hdev)
2841{
David Herrmann9be0dab2012-04-22 14:39:57 +02002842 /* will free via device release */
2843 put_device(&hdev->dev);
2844}
2845EXPORT_SYMBOL(hci_free_dev);
2846
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847/* Register HCI device */
2848int hci_register_dev(struct hci_dev *hdev)
2849{
David Herrmannb1b813d2012-04-22 14:39:58 +02002850 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851
David Herrmann010666a2012-01-07 15:47:07 +01002852 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 return -EINVAL;
2854
Mat Martineau08add512011-11-02 16:18:36 -07002855 /* Do not allow HCI_AMP devices to register at index 0,
2856 * so the index can be used as the AMP controller ID.
2857 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002858 switch (hdev->dev_type) {
2859 case HCI_BREDR:
2860 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2861 break;
2862 case HCI_AMP:
2863 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2864 break;
2865 default:
2866 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002868
Sasha Levin3df92b32012-05-27 22:36:56 +02002869 if (id < 0)
2870 return id;
2871
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 sprintf(hdev->name, "hci%d", id);
2873 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002874
2875 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2876
Kees Cookd8537542013-07-03 15:04:57 -07002877 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2878 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002879 if (!hdev->workqueue) {
2880 error = -ENOMEM;
2881 goto err;
2882 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002883
Kees Cookd8537542013-07-03 15:04:57 -07002884 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2885 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002886 if (!hdev->req_workqueue) {
2887 destroy_workqueue(hdev->workqueue);
2888 error = -ENOMEM;
2889 goto err;
2890 }
2891
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002892 if (!IS_ERR_OR_NULL(bt_debugfs))
2893 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2894
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002895 dev_set_name(&hdev->dev, "%s", hdev->name);
2896
2897 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002898 if (error < 0)
2899 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002901 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002902 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2903 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002904 if (hdev->rfkill) {
2905 if (rfkill_register(hdev->rfkill) < 0) {
2906 rfkill_destroy(hdev->rfkill);
2907 hdev->rfkill = NULL;
2908 }
2909 }
2910
Johan Hedberg5e130362013-09-13 08:58:17 +03002911 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2912 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2913
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002914 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002915 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002916
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002917 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002918 /* Assume BR/EDR support until proven otherwise (such as
2919 * through reading supported features during init.
2920 */
2921 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2922 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002923
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002924 write_lock(&hci_dev_list_lock);
2925 list_add(&hdev->list, &hci_dev_list);
2926 write_unlock(&hci_dev_list_lock);
2927
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002929 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930
Johan Hedberg19202572013-01-14 22:33:51 +02002931 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002932
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002934
David Herrmann33ca9542011-10-08 14:58:49 +02002935err_wqueue:
2936 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002937 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002938err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002939 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002940
David Herrmann33ca9542011-10-08 14:58:49 +02002941 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942}
2943EXPORT_SYMBOL(hci_register_dev);
2944
2945/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002946void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947{
Sasha Levin3df92b32012-05-27 22:36:56 +02002948 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002949
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002950 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951
Johan Hovold94324962012-03-15 14:48:41 +01002952 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2953
Sasha Levin3df92b32012-05-27 22:36:56 +02002954 id = hdev->id;
2955
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002956 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002958 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959
2960 hci_dev_do_close(hdev);
2961
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302962 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002963 kfree_skb(hdev->reassembly[i]);
2964
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002965 cancel_work_sync(&hdev->power_on);
2966
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002967 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002968 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002969 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002970 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002971 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002972 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002973
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002974 /* mgmt_index_removed should take care of emptying the
2975 * pending list */
2976 BUG_ON(!list_empty(&hdev->mgmt_pending));
2977
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 hci_notify(hdev, HCI_DEV_UNREG);
2979
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002980 if (hdev->rfkill) {
2981 rfkill_unregister(hdev->rfkill);
2982 rfkill_destroy(hdev->rfkill);
2983 }
2984
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002985 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002986
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002987 debugfs_remove_recursive(hdev->debugfs);
2988
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002989 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002990 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002991
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002992 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002993 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002994 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002995 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002996 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002997 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002998 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002999
David Herrmanndc946bd2012-01-07 15:47:24 +01003000 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003001
3002 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003}
3004EXPORT_SYMBOL(hci_unregister_dev);
3005
3006/* Suspend HCI device */
3007int hci_suspend_dev(struct hci_dev *hdev)
3008{
3009 hci_notify(hdev, HCI_DEV_SUSPEND);
3010 return 0;
3011}
3012EXPORT_SYMBOL(hci_suspend_dev);
3013
3014/* Resume HCI device */
3015int hci_resume_dev(struct hci_dev *hdev)
3016{
3017 hci_notify(hdev, HCI_DEV_RESUME);
3018 return 0;
3019}
3020EXPORT_SYMBOL(hci_resume_dev);
3021
Marcel Holtmann76bca882009-11-18 00:40:39 +01003022/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003023int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003024{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003025 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003026 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003027 kfree_skb(skb);
3028 return -ENXIO;
3029 }
3030
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003031 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003032 bt_cb(skb)->incoming = 1;
3033
3034 /* Time stamp */
3035 __net_timestamp(skb);
3036
Marcel Holtmann76bca882009-11-18 00:40:39 +01003037 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003038 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003039
Marcel Holtmann76bca882009-11-18 00:40:39 +01003040 return 0;
3041}
3042EXPORT_SYMBOL(hci_recv_frame);
3043
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303044static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003045 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303046{
3047 int len = 0;
3048 int hlen = 0;
3049 int remain = count;
3050 struct sk_buff *skb;
3051 struct bt_skb_cb *scb;
3052
3053 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003054 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303055 return -EILSEQ;
3056
3057 skb = hdev->reassembly[index];
3058
3059 if (!skb) {
3060 switch (type) {
3061 case HCI_ACLDATA_PKT:
3062 len = HCI_MAX_FRAME_SIZE;
3063 hlen = HCI_ACL_HDR_SIZE;
3064 break;
3065 case HCI_EVENT_PKT:
3066 len = HCI_MAX_EVENT_SIZE;
3067 hlen = HCI_EVENT_HDR_SIZE;
3068 break;
3069 case HCI_SCODATA_PKT:
3070 len = HCI_MAX_SCO_SIZE;
3071 hlen = HCI_SCO_HDR_SIZE;
3072 break;
3073 }
3074
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003075 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303076 if (!skb)
3077 return -ENOMEM;
3078
3079 scb = (void *) skb->cb;
3080 scb->expect = hlen;
3081 scb->pkt_type = type;
3082
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303083 hdev->reassembly[index] = skb;
3084 }
3085
3086 while (count) {
3087 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003088 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303089
3090 memcpy(skb_put(skb, len), data, len);
3091
3092 count -= len;
3093 data += len;
3094 scb->expect -= len;
3095 remain = count;
3096
3097 switch (type) {
3098 case HCI_EVENT_PKT:
3099 if (skb->len == HCI_EVENT_HDR_SIZE) {
3100 struct hci_event_hdr *h = hci_event_hdr(skb);
3101 scb->expect = h->plen;
3102
3103 if (skb_tailroom(skb) < scb->expect) {
3104 kfree_skb(skb);
3105 hdev->reassembly[index] = NULL;
3106 return -ENOMEM;
3107 }
3108 }
3109 break;
3110
3111 case HCI_ACLDATA_PKT:
3112 if (skb->len == HCI_ACL_HDR_SIZE) {
3113 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3114 scb->expect = __le16_to_cpu(h->dlen);
3115
3116 if (skb_tailroom(skb) < scb->expect) {
3117 kfree_skb(skb);
3118 hdev->reassembly[index] = NULL;
3119 return -ENOMEM;
3120 }
3121 }
3122 break;
3123
3124 case HCI_SCODATA_PKT:
3125 if (skb->len == HCI_SCO_HDR_SIZE) {
3126 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3127 scb->expect = h->dlen;
3128
3129 if (skb_tailroom(skb) < scb->expect) {
3130 kfree_skb(skb);
3131 hdev->reassembly[index] = NULL;
3132 return -ENOMEM;
3133 }
3134 }
3135 break;
3136 }
3137
3138 if (scb->expect == 0) {
3139 /* Complete frame */
3140
3141 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003142 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303143
3144 hdev->reassembly[index] = NULL;
3145 return remain;
3146 }
3147 }
3148
3149 return remain;
3150}
3151
Marcel Holtmannef222012007-07-11 06:42:04 +02003152int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3153{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303154 int rem = 0;
3155
Marcel Holtmannef222012007-07-11 06:42:04 +02003156 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3157 return -EILSEQ;
3158
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003159 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003160 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303161 if (rem < 0)
3162 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003163
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303164 data += (count - rem);
3165 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003166 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003167
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303168 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003169}
3170EXPORT_SYMBOL(hci_recv_fragment);
3171
Suraj Sumangala99811512010-07-14 13:02:19 +05303172#define STREAM_REASSEMBLY 0
3173
3174int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3175{
3176 int type;
3177 int rem = 0;
3178
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003179 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303180 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3181
3182 if (!skb) {
3183 struct { char type; } *pkt;
3184
3185 /* Start of the frame */
3186 pkt = data;
3187 type = pkt->type;
3188
3189 data++;
3190 count--;
3191 } else
3192 type = bt_cb(skb)->pkt_type;
3193
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003194 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003195 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303196 if (rem < 0)
3197 return rem;
3198
3199 data += (count - rem);
3200 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003201 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303202
3203 return rem;
3204}
3205EXPORT_SYMBOL(hci_recv_stream_fragment);
3206
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207/* ---- Interface to upper protocols ---- */
3208
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209int hci_register_cb(struct hci_cb *cb)
3210{
3211 BT_DBG("%p name %s", cb, cb->name);
3212
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003213 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003215 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216
3217 return 0;
3218}
3219EXPORT_SYMBOL(hci_register_cb);
3220
3221int hci_unregister_cb(struct hci_cb *cb)
3222{
3223 BT_DBG("%p name %s", cb, cb->name);
3224
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003225 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003227 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228
3229 return 0;
3230}
3231EXPORT_SYMBOL(hci_unregister_cb);
3232
Marcel Holtmann51086992013-10-10 14:54:19 -07003233static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003235 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003237 /* Time stamp */
3238 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003240 /* Send copy to monitor */
3241 hci_send_to_monitor(hdev, skb);
3242
3243 if (atomic_read(&hdev->promisc)) {
3244 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003245 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 }
3247
3248 /* Get rid of skb owner, prior to sending to the driver. */
3249 skb_orphan(skb);
3250
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003251 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003252 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253}
3254
Johan Hedberg3119ae92013-03-05 20:37:44 +02003255void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3256{
3257 skb_queue_head_init(&req->cmd_q);
3258 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003259 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003260}
3261
3262int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3263{
3264 struct hci_dev *hdev = req->hdev;
3265 struct sk_buff *skb;
3266 unsigned long flags;
3267
3268 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3269
Andre Guedes5d73e032013-03-08 11:20:16 -03003270 /* If an error occured during request building, remove all HCI
3271 * commands queued on the HCI request queue.
3272 */
3273 if (req->err) {
3274 skb_queue_purge(&req->cmd_q);
3275 return req->err;
3276 }
3277
Johan Hedberg3119ae92013-03-05 20:37:44 +02003278 /* Do not allow empty requests */
3279 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003280 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003281
3282 skb = skb_peek_tail(&req->cmd_q);
3283 bt_cb(skb)->req.complete = complete;
3284
3285 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3286 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3287 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3288
3289 queue_work(hdev->workqueue, &hdev->cmd_work);
3290
3291 return 0;
3292}
3293
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003294static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003295 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296{
3297 int len = HCI_COMMAND_HDR_SIZE + plen;
3298 struct hci_command_hdr *hdr;
3299 struct sk_buff *skb;
3300
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003302 if (!skb)
3303 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304
3305 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003306 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 hdr->plen = plen;
3308
3309 if (plen)
3310 memcpy(skb_put(skb, plen), param, plen);
3311
3312 BT_DBG("skb len %d", skb->len);
3313
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003314 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003315
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003316 return skb;
3317}
3318
3319/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003320int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3321 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003322{
3323 struct sk_buff *skb;
3324
3325 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3326
3327 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3328 if (!skb) {
3329 BT_ERR("%s no memory for command", hdev->name);
3330 return -ENOMEM;
3331 }
3332
Johan Hedberg11714b32013-03-05 20:37:47 +02003333 /* Stand-alone HCI commands must be flaged as
3334 * single-command requests.
3335 */
3336 bt_cb(skb)->req.start = true;
3337
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003339 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340
3341 return 0;
3342}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343
Johan Hedberg71c76a12013-03-05 20:37:46 +02003344/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003345void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3346 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003347{
3348 struct hci_dev *hdev = req->hdev;
3349 struct sk_buff *skb;
3350
3351 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3352
Andre Guedes34739c12013-03-08 11:20:18 -03003353 /* If an error occured during request building, there is no point in
3354 * queueing the HCI command. We can simply return.
3355 */
3356 if (req->err)
3357 return;
3358
Johan Hedberg71c76a12013-03-05 20:37:46 +02003359 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3360 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003361 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3362 hdev->name, opcode);
3363 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003364 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003365 }
3366
3367 if (skb_queue_empty(&req->cmd_q))
3368 bt_cb(skb)->req.start = true;
3369
Johan Hedberg02350a72013-04-03 21:50:29 +03003370 bt_cb(skb)->req.event = event;
3371
Johan Hedberg71c76a12013-03-05 20:37:46 +02003372 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003373}
3374
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003375void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3376 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003377{
3378 hci_req_add_ev(req, opcode, plen, param, 0);
3379}
3380
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003382void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383{
3384 struct hci_command_hdr *hdr;
3385
3386 if (!hdev->sent_cmd)
3387 return NULL;
3388
3389 hdr = (void *) hdev->sent_cmd->data;
3390
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003391 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 return NULL;
3393
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003394 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395
3396 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3397}
3398
3399/* Send ACL data */
3400static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3401{
3402 struct hci_acl_hdr *hdr;
3403 int len = skb->len;
3404
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003405 skb_push(skb, HCI_ACL_HDR_SIZE);
3406 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003407 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003408 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3409 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410}
3411
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003412static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003413 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003415 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 struct hci_dev *hdev = conn->hdev;
3417 struct sk_buff *list;
3418
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003419 skb->len = skb_headlen(skb);
3420 skb->data_len = 0;
3421
3422 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003423
3424 switch (hdev->dev_type) {
3425 case HCI_BREDR:
3426 hci_add_acl_hdr(skb, conn->handle, flags);
3427 break;
3428 case HCI_AMP:
3429 hci_add_acl_hdr(skb, chan->handle, flags);
3430 break;
3431 default:
3432 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3433 return;
3434 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003435
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003436 list = skb_shinfo(skb)->frag_list;
3437 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 /* Non fragmented */
3439 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3440
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003441 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 } else {
3443 /* Fragmented */
3444 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3445
3446 skb_shinfo(skb)->frag_list = NULL;
3447
3448 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003449 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003451 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003452
3453 flags &= ~ACL_START;
3454 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455 do {
3456 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003457
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003458 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003459 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460
3461 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3462
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003463 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 } while (list);
3465
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003466 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003468}
3469
3470void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3471{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003472 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003473
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003474 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003475
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003476 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003478 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480
3481/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003482void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483{
3484 struct hci_dev *hdev = conn->hdev;
3485 struct hci_sco_hdr hdr;
3486
3487 BT_DBG("%s len %d", hdev->name, skb->len);
3488
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003489 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 hdr.dlen = skb->len;
3491
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003492 skb_push(skb, HCI_SCO_HDR_SIZE);
3493 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003494 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003496 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003497
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003499 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501
3502/* ---- HCI TX task (outgoing data) ---- */
3503
3504/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003505static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3506 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507{
3508 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003509 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003510 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003512 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003514
3515 rcu_read_lock();
3516
3517 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003518 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003520
3521 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3522 continue;
3523
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 num++;
3525
3526 if (c->sent < min) {
3527 min = c->sent;
3528 conn = c;
3529 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003530
3531 if (hci_conn_num(hdev, type) == num)
3532 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 }
3534
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003535 rcu_read_unlock();
3536
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003538 int cnt, q;
3539
3540 switch (conn->type) {
3541 case ACL_LINK:
3542 cnt = hdev->acl_cnt;
3543 break;
3544 case SCO_LINK:
3545 case ESCO_LINK:
3546 cnt = hdev->sco_cnt;
3547 break;
3548 case LE_LINK:
3549 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3550 break;
3551 default:
3552 cnt = 0;
3553 BT_ERR("Unknown link type");
3554 }
3555
3556 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 *quote = q ? q : 1;
3558 } else
3559 *quote = 0;
3560
3561 BT_DBG("conn %p quote %d", conn, *quote);
3562 return conn;
3563}
3564
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003565static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566{
3567 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003568 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569
Ville Tervobae1f5d92011-02-10 22:38:53 -03003570 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003572 rcu_read_lock();
3573
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003575 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003576 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003577 BT_ERR("%s killing stalled connection %pMR",
3578 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003579 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 }
3581 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003582
3583 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584}
3585
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003586static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3587 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003588{
3589 struct hci_conn_hash *h = &hdev->conn_hash;
3590 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003591 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003592 struct hci_conn *conn;
3593 int cnt, q, conn_num = 0;
3594
3595 BT_DBG("%s", hdev->name);
3596
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003597 rcu_read_lock();
3598
3599 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003600 struct hci_chan *tmp;
3601
3602 if (conn->type != type)
3603 continue;
3604
3605 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3606 continue;
3607
3608 conn_num++;
3609
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003610 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003611 struct sk_buff *skb;
3612
3613 if (skb_queue_empty(&tmp->data_q))
3614 continue;
3615
3616 skb = skb_peek(&tmp->data_q);
3617 if (skb->priority < cur_prio)
3618 continue;
3619
3620 if (skb->priority > cur_prio) {
3621 num = 0;
3622 min = ~0;
3623 cur_prio = skb->priority;
3624 }
3625
3626 num++;
3627
3628 if (conn->sent < min) {
3629 min = conn->sent;
3630 chan = tmp;
3631 }
3632 }
3633
3634 if (hci_conn_num(hdev, type) == conn_num)
3635 break;
3636 }
3637
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003638 rcu_read_unlock();
3639
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003640 if (!chan)
3641 return NULL;
3642
3643 switch (chan->conn->type) {
3644 case ACL_LINK:
3645 cnt = hdev->acl_cnt;
3646 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003647 case AMP_LINK:
3648 cnt = hdev->block_cnt;
3649 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003650 case SCO_LINK:
3651 case ESCO_LINK:
3652 cnt = hdev->sco_cnt;
3653 break;
3654 case LE_LINK:
3655 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3656 break;
3657 default:
3658 cnt = 0;
3659 BT_ERR("Unknown link type");
3660 }
3661
3662 q = cnt / num;
3663 *quote = q ? q : 1;
3664 BT_DBG("chan %p quote %d", chan, *quote);
3665 return chan;
3666}
3667
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003668static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3669{
3670 struct hci_conn_hash *h = &hdev->conn_hash;
3671 struct hci_conn *conn;
3672 int num = 0;
3673
3674 BT_DBG("%s", hdev->name);
3675
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003676 rcu_read_lock();
3677
3678 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003679 struct hci_chan *chan;
3680
3681 if (conn->type != type)
3682 continue;
3683
3684 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3685 continue;
3686
3687 num++;
3688
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003689 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003690 struct sk_buff *skb;
3691
3692 if (chan->sent) {
3693 chan->sent = 0;
3694 continue;
3695 }
3696
3697 if (skb_queue_empty(&chan->data_q))
3698 continue;
3699
3700 skb = skb_peek(&chan->data_q);
3701 if (skb->priority >= HCI_PRIO_MAX - 1)
3702 continue;
3703
3704 skb->priority = HCI_PRIO_MAX - 1;
3705
3706 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003707 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003708 }
3709
3710 if (hci_conn_num(hdev, type) == num)
3711 break;
3712 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003713
3714 rcu_read_unlock();
3715
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003716}
3717
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003718static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3719{
3720 /* Calculate count of blocks used by this packet */
3721 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3722}
3723
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003724static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 if (!test_bit(HCI_RAW, &hdev->flags)) {
3727 /* ACL tx timeout must be longer than maximum
3728 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003729 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003730 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003731 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003733}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003735static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003736{
3737 unsigned int cnt = hdev->acl_cnt;
3738 struct hci_chan *chan;
3739 struct sk_buff *skb;
3740 int quote;
3741
3742 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003743
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003744 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003745 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003746 u32 priority = (skb_peek(&chan->data_q))->priority;
3747 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003748 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003749 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003750
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003751 /* Stop if priority has changed */
3752 if (skb->priority < priority)
3753 break;
3754
3755 skb = skb_dequeue(&chan->data_q);
3756
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003757 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003758 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003759
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003760 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761 hdev->acl_last_tx = jiffies;
3762
3763 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003764 chan->sent++;
3765 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 }
3767 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003768
3769 if (cnt != hdev->acl_cnt)
3770 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771}
3772
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003773static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003774{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003775 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003776 struct hci_chan *chan;
3777 struct sk_buff *skb;
3778 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003779 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003780
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003781 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003782
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003783 BT_DBG("%s", hdev->name);
3784
3785 if (hdev->dev_type == HCI_AMP)
3786 type = AMP_LINK;
3787 else
3788 type = ACL_LINK;
3789
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003790 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003791 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003792 u32 priority = (skb_peek(&chan->data_q))->priority;
3793 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3794 int blocks;
3795
3796 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003797 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003798
3799 /* Stop if priority has changed */
3800 if (skb->priority < priority)
3801 break;
3802
3803 skb = skb_dequeue(&chan->data_q);
3804
3805 blocks = __get_blocks(hdev, skb);
3806 if (blocks > hdev->block_cnt)
3807 return;
3808
3809 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003810 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003811
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003812 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003813 hdev->acl_last_tx = jiffies;
3814
3815 hdev->block_cnt -= blocks;
3816 quote -= blocks;
3817
3818 chan->sent += blocks;
3819 chan->conn->sent += blocks;
3820 }
3821 }
3822
3823 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003824 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003825}
3826
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003827static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003828{
3829 BT_DBG("%s", hdev->name);
3830
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003831 /* No ACL link over BR/EDR controller */
3832 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3833 return;
3834
3835 /* No AMP link over AMP controller */
3836 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003837 return;
3838
3839 switch (hdev->flow_ctl_mode) {
3840 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3841 hci_sched_acl_pkt(hdev);
3842 break;
3843
3844 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3845 hci_sched_acl_blk(hdev);
3846 break;
3847 }
3848}
3849
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003851static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852{
3853 struct hci_conn *conn;
3854 struct sk_buff *skb;
3855 int quote;
3856
3857 BT_DBG("%s", hdev->name);
3858
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003859 if (!hci_conn_num(hdev, SCO_LINK))
3860 return;
3861
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3863 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3864 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003865 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866
3867 conn->sent++;
3868 if (conn->sent == ~0)
3869 conn->sent = 0;
3870 }
3871 }
3872}
3873
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003874static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003875{
3876 struct hci_conn *conn;
3877 struct sk_buff *skb;
3878 int quote;
3879
3880 BT_DBG("%s", hdev->name);
3881
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003882 if (!hci_conn_num(hdev, ESCO_LINK))
3883 return;
3884
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003885 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3886 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003887 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3888 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003889 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003890
3891 conn->sent++;
3892 if (conn->sent == ~0)
3893 conn->sent = 0;
3894 }
3895 }
3896}
3897
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003898static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003899{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003900 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003901 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003902 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003903
3904 BT_DBG("%s", hdev->name);
3905
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003906 if (!hci_conn_num(hdev, LE_LINK))
3907 return;
3908
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003909 if (!test_bit(HCI_RAW, &hdev->flags)) {
3910 /* LE tx timeout must be longer than maximum
3911 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003912 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003913 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003914 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003915 }
3916
3917 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003918 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003919 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003920 u32 priority = (skb_peek(&chan->data_q))->priority;
3921 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003922 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003923 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003924
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003925 /* Stop if priority has changed */
3926 if (skb->priority < priority)
3927 break;
3928
3929 skb = skb_dequeue(&chan->data_q);
3930
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003931 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003932 hdev->le_last_tx = jiffies;
3933
3934 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003935 chan->sent++;
3936 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003937 }
3938 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003939
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003940 if (hdev->le_pkts)
3941 hdev->le_cnt = cnt;
3942 else
3943 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003944
3945 if (cnt != tmp)
3946 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003947}
3948
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003949static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003951 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952 struct sk_buff *skb;
3953
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003954 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003955 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
Marcel Holtmann52de5992013-09-03 18:08:38 -07003957 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3958 /* Schedule queues and send stuff to HCI driver */
3959 hci_sched_acl(hdev);
3960 hci_sched_sco(hdev);
3961 hci_sched_esco(hdev);
3962 hci_sched_le(hdev);
3963 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003964
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965 /* Send next queued raw (unknown type) packet */
3966 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003967 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968}
3969
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003970/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971
3972/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003973static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974{
3975 struct hci_acl_hdr *hdr = (void *) skb->data;
3976 struct hci_conn *conn;
3977 __u16 handle, flags;
3978
3979 skb_pull(skb, HCI_ACL_HDR_SIZE);
3980
3981 handle = __le16_to_cpu(hdr->handle);
3982 flags = hci_flags(handle);
3983 handle = hci_handle(handle);
3984
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003985 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003986 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987
3988 hdev->stat.acl_rx++;
3989
3990 hci_dev_lock(hdev);
3991 conn = hci_conn_hash_lookup_handle(hdev, handle);
3992 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003993
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003995 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003996
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003998 l2cap_recv_acldata(conn, skb, flags);
3999 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004001 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004002 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003 }
4004
4005 kfree_skb(skb);
4006}
4007
4008/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004009static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010{
4011 struct hci_sco_hdr *hdr = (void *) skb->data;
4012 struct hci_conn *conn;
4013 __u16 handle;
4014
4015 skb_pull(skb, HCI_SCO_HDR_SIZE);
4016
4017 handle = __le16_to_cpu(hdr->handle);
4018
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004019 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020
4021 hdev->stat.sco_rx++;
4022
4023 hci_dev_lock(hdev);
4024 conn = hci_conn_hash_lookup_handle(hdev, handle);
4025 hci_dev_unlock(hdev);
4026
4027 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004029 sco_recv_scodata(conn, skb);
4030 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004032 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004033 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034 }
4035
4036 kfree_skb(skb);
4037}
4038
Johan Hedberg9238f362013-03-05 20:37:48 +02004039static bool hci_req_is_complete(struct hci_dev *hdev)
4040{
4041 struct sk_buff *skb;
4042
4043 skb = skb_peek(&hdev->cmd_q);
4044 if (!skb)
4045 return true;
4046
4047 return bt_cb(skb)->req.start;
4048}
4049
Johan Hedberg42c6b122013-03-05 20:37:49 +02004050static void hci_resend_last(struct hci_dev *hdev)
4051{
4052 struct hci_command_hdr *sent;
4053 struct sk_buff *skb;
4054 u16 opcode;
4055
4056 if (!hdev->sent_cmd)
4057 return;
4058
4059 sent = (void *) hdev->sent_cmd->data;
4060 opcode = __le16_to_cpu(sent->opcode);
4061 if (opcode == HCI_OP_RESET)
4062 return;
4063
4064 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4065 if (!skb)
4066 return;
4067
4068 skb_queue_head(&hdev->cmd_q, skb);
4069 queue_work(hdev->workqueue, &hdev->cmd_work);
4070}
4071
Johan Hedberg9238f362013-03-05 20:37:48 +02004072void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4073{
4074 hci_req_complete_t req_complete = NULL;
4075 struct sk_buff *skb;
4076 unsigned long flags;
4077
4078 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4079
Johan Hedberg42c6b122013-03-05 20:37:49 +02004080 /* If the completed command doesn't match the last one that was
4081 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004082 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004083 if (!hci_sent_cmd_data(hdev, opcode)) {
4084 /* Some CSR based controllers generate a spontaneous
4085 * reset complete event during init and any pending
4086 * command will never be completed. In such a case we
4087 * need to resend whatever was the last sent
4088 * command.
4089 */
4090 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4091 hci_resend_last(hdev);
4092
Johan Hedberg9238f362013-03-05 20:37:48 +02004093 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004094 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004095
4096 /* If the command succeeded and there's still more commands in
4097 * this request the request is not yet complete.
4098 */
4099 if (!status && !hci_req_is_complete(hdev))
4100 return;
4101
4102 /* If this was the last command in a request the complete
4103 * callback would be found in hdev->sent_cmd instead of the
4104 * command queue (hdev->cmd_q).
4105 */
4106 if (hdev->sent_cmd) {
4107 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004108
4109 if (req_complete) {
4110 /* We must set the complete callback to NULL to
4111 * avoid calling the callback more than once if
4112 * this function gets called again.
4113 */
4114 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4115
Johan Hedberg9238f362013-03-05 20:37:48 +02004116 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004117 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004118 }
4119
4120 /* Remove all pending commands belonging to this request */
4121 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4122 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4123 if (bt_cb(skb)->req.start) {
4124 __skb_queue_head(&hdev->cmd_q, skb);
4125 break;
4126 }
4127
4128 req_complete = bt_cb(skb)->req.complete;
4129 kfree_skb(skb);
4130 }
4131 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4132
4133call_complete:
4134 if (req_complete)
4135 req_complete(hdev, status);
4136}
4137
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004138static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004140 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141 struct sk_buff *skb;
4142
4143 BT_DBG("%s", hdev->name);
4144
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004146 /* Send copy to monitor */
4147 hci_send_to_monitor(hdev, skb);
4148
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 if (atomic_read(&hdev->promisc)) {
4150 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004151 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152 }
4153
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004154 if (test_bit(HCI_RAW, &hdev->flags) ||
4155 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 kfree_skb(skb);
4157 continue;
4158 }
4159
4160 if (test_bit(HCI_INIT, &hdev->flags)) {
4161 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004162 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 case HCI_ACLDATA_PKT:
4164 case HCI_SCODATA_PKT:
4165 kfree_skb(skb);
4166 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 }
4169
4170 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004171 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004173 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 hci_event_packet(hdev, skb);
4175 break;
4176
4177 case HCI_ACLDATA_PKT:
4178 BT_DBG("%s ACL data packet", hdev->name);
4179 hci_acldata_packet(hdev, skb);
4180 break;
4181
4182 case HCI_SCODATA_PKT:
4183 BT_DBG("%s SCO data packet", hdev->name);
4184 hci_scodata_packet(hdev, skb);
4185 break;
4186
4187 default:
4188 kfree_skb(skb);
4189 break;
4190 }
4191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192}
4193
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004194static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004196 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 struct sk_buff *skb;
4198
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004199 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4200 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004203 if (atomic_read(&hdev->cmd_cnt)) {
4204 skb = skb_dequeue(&hdev->cmd_q);
4205 if (!skb)
4206 return;
4207
Wei Yongjun7585b972009-02-25 18:29:52 +08004208 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004210 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004211 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004213 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004214 if (test_bit(HCI_RESET, &hdev->flags))
4215 del_timer(&hdev->cmd_timer);
4216 else
4217 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004218 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219 } else {
4220 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004221 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 }
4223 }
4224}