blob: 384b9db16f694a5e1613436150190bffee8f6f6d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -070068 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070069 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -070075 if (lmp_le_capable(hdev))
76 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
77 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
78 hdev->le_features[0], hdev->le_features[1],
79 hdev->le_features[2], hdev->le_features[3],
80 hdev->le_features[4], hdev->le_features[5],
81 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070082 hci_dev_unlock(hdev);
83
84 return 0;
85}
86
87static int features_open(struct inode *inode, struct file *file)
88{
89 return single_open(file, features_show, inode->i_private);
90}
91
92static const struct file_operations features_fops = {
93 .open = features_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = single_release,
97};
98
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070099static int blacklist_show(struct seq_file *f, void *p)
100{
101 struct hci_dev *hdev = f->private;
102 struct bdaddr_list *b;
103
104 hci_dev_lock(hdev);
105 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700106 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700107 hci_dev_unlock(hdev);
108
109 return 0;
110}
111
112static int blacklist_open(struct inode *inode, struct file *file)
113{
114 return single_open(file, blacklist_show, inode->i_private);
115}
116
117static const struct file_operations blacklist_fops = {
118 .open = blacklist_open,
119 .read = seq_read,
120 .llseek = seq_lseek,
121 .release = single_release,
122};
123
Marcel Holtmann47219832013-10-17 17:24:15 -0700124static int uuids_show(struct seq_file *f, void *p)
125{
126 struct hci_dev *hdev = f->private;
127 struct bt_uuid *uuid;
128
129 hci_dev_lock(hdev);
130 list_for_each_entry(uuid, &hdev->uuids, list) {
131 u32 data0, data5;
132 u16 data1, data2, data3, data4;
133
134 data5 = get_unaligned_le32(uuid);
135 data4 = get_unaligned_le16(uuid + 4);
136 data3 = get_unaligned_le16(uuid + 6);
137 data2 = get_unaligned_le16(uuid + 8);
138 data1 = get_unaligned_le16(uuid + 10);
139 data0 = get_unaligned_le32(uuid + 12);
140
141 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
142 data0, data1, data2, data3, data4, data5);
143 }
144 hci_dev_unlock(hdev);
145
146 return 0;
147}
148
149static int uuids_open(struct inode *inode, struct file *file)
150{
151 return single_open(file, uuids_show, inode->i_private);
152}
153
154static const struct file_operations uuids_fops = {
155 .open = uuids_open,
156 .read = seq_read,
157 .llseek = seq_lseek,
158 .release = single_release,
159};
160
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700161static int inquiry_cache_show(struct seq_file *f, void *p)
162{
163 struct hci_dev *hdev = f->private;
164 struct discovery_state *cache = &hdev->discovery;
165 struct inquiry_entry *e;
166
167 hci_dev_lock(hdev);
168
169 list_for_each_entry(e, &cache->all, all) {
170 struct inquiry_data *data = &e->data;
171 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
172 &data->bdaddr,
173 data->pscan_rep_mode, data->pscan_period_mode,
174 data->pscan_mode, data->dev_class[2],
175 data->dev_class[1], data->dev_class[0],
176 __le16_to_cpu(data->clock_offset),
177 data->rssi, data->ssp_mode, e->timestamp);
178 }
179
180 hci_dev_unlock(hdev);
181
182 return 0;
183}
184
185static int inquiry_cache_open(struct inode *inode, struct file *file)
186{
187 return single_open(file, inquiry_cache_show, inode->i_private);
188}
189
190static const struct file_operations inquiry_cache_fops = {
191 .open = inquiry_cache_open,
192 .read = seq_read,
193 .llseek = seq_lseek,
194 .release = single_release,
195};
196
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700197static int link_keys_show(struct seq_file *f, void *ptr)
198{
199 struct hci_dev *hdev = f->private;
200 struct list_head *p, *n;
201
202 hci_dev_lock(hdev);
203 list_for_each_safe(p, n, &hdev->link_keys) {
204 struct link_key *key = list_entry(p, struct link_key, list);
205 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
206 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
207 }
208 hci_dev_unlock(hdev);
209
210 return 0;
211}
212
213static int link_keys_open(struct inode *inode, struct file *file)
214{
215 return single_open(file, link_keys_show, inode->i_private);
216}
217
218static const struct file_operations link_keys_fops = {
219 .open = link_keys_open,
220 .read = seq_read,
221 .llseek = seq_lseek,
222 .release = single_release,
223};
224
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700225static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
226 size_t count, loff_t *ppos)
227{
228 struct hci_dev *hdev = file->private_data;
229 char buf[3];
230
231 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
232 buf[1] = '\n';
233 buf[2] = '\0';
234 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
235}
236
237static const struct file_operations use_debug_keys_fops = {
238 .open = simple_open,
239 .read = use_debug_keys_read,
240 .llseek = default_llseek,
241};
242
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700243static int dev_class_show(struct seq_file *f, void *ptr)
244{
245 struct hci_dev *hdev = f->private;
246
247 hci_dev_lock(hdev);
248 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
249 hdev->dev_class[1], hdev->dev_class[0]);
250 hci_dev_unlock(hdev);
251
252 return 0;
253}
254
255static int dev_class_open(struct inode *inode, struct file *file)
256{
257 return single_open(file, dev_class_show, inode->i_private);
258}
259
260static const struct file_operations dev_class_fops = {
261 .open = dev_class_open,
262 .read = seq_read,
263 .llseek = seq_lseek,
264 .release = single_release,
265};
266
Marcel Holtmann041000b2013-10-17 12:02:31 -0700267static int voice_setting_get(void *data, u64 *val)
268{
269 struct hci_dev *hdev = data;
270
271 hci_dev_lock(hdev);
272 *val = hdev->voice_setting;
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
279 NULL, "0x%4.4llx\n");
280
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700281static int auto_accept_delay_set(void *data, u64 val)
282{
283 struct hci_dev *hdev = data;
284
285 hci_dev_lock(hdev);
286 hdev->auto_accept_delay = val;
287 hci_dev_unlock(hdev);
288
289 return 0;
290}
291
292static int auto_accept_delay_get(void *data, u64 *val)
293{
294 struct hci_dev *hdev = data;
295
296 hci_dev_lock(hdev);
297 *val = hdev->auto_accept_delay;
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
304 auto_accept_delay_set, "%llu\n");
305
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700306static int idle_timeout_set(void *data, u64 val)
307{
308 struct hci_dev *hdev = data;
309
310 if (val != 0 && (val < 500 || val > 3600000))
311 return -EINVAL;
312
313 hci_dev_lock(hdev);
314 hdev->idle_timeout= val;
315 hci_dev_unlock(hdev);
316
317 return 0;
318}
319
320static int idle_timeout_get(void *data, u64 *val)
321{
322 struct hci_dev *hdev = data;
323
324 hci_dev_lock(hdev);
325 *val = hdev->idle_timeout;
326 hci_dev_unlock(hdev);
327
328 return 0;
329}
330
331DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
332 idle_timeout_set, "%llu\n");
333
334static int sniff_min_interval_set(void *data, u64 val)
335{
336 struct hci_dev *hdev = data;
337
338 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
339 return -EINVAL;
340
341 hci_dev_lock(hdev);
342 hdev->sniff_min_interval= val;
343 hci_dev_unlock(hdev);
344
345 return 0;
346}
347
348static int sniff_min_interval_get(void *data, u64 *val)
349{
350 struct hci_dev *hdev = data;
351
352 hci_dev_lock(hdev);
353 *val = hdev->sniff_min_interval;
354 hci_dev_unlock(hdev);
355
356 return 0;
357}
358
359DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
360 sniff_min_interval_set, "%llu\n");
361
362static int sniff_max_interval_set(void *data, u64 val)
363{
364 struct hci_dev *hdev = data;
365
366 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
367 return -EINVAL;
368
369 hci_dev_lock(hdev);
370 hdev->sniff_max_interval= val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int sniff_max_interval_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->sniff_max_interval;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
388 sniff_max_interval_set, "%llu\n");
389
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700390static int static_address_show(struct seq_file *f, void *p)
391{
392 struct hci_dev *hdev = f->private;
393
394 hci_dev_lock(hdev);
395 seq_printf(f, "%pMR\n", &hdev->static_addr);
396 hci_dev_unlock(hdev);
397
398 return 0;
399}
400
401static int static_address_open(struct inode *inode, struct file *file)
402{
403 return single_open(file, static_address_show, inode->i_private);
404}
405
406static const struct file_operations static_address_fops = {
407 .open = static_address_open,
408 .read = seq_read,
409 .llseek = seq_lseek,
410 .release = single_release,
411};
412
Marcel Holtmann92202182013-10-18 16:38:10 -0700413static int own_address_type_set(void *data, u64 val)
414{
415 struct hci_dev *hdev = data;
416
417 if (val != 0 && val != 1)
418 return -EINVAL;
419
420 hci_dev_lock(hdev);
421 hdev->own_addr_type = val;
422 hci_dev_unlock(hdev);
423
424 return 0;
425}
426
427static int own_address_type_get(void *data, u64 *val)
428{
429 struct hci_dev *hdev = data;
430
431 hci_dev_lock(hdev);
432 *val = hdev->own_addr_type;
433 hci_dev_unlock(hdev);
434
435 return 0;
436}
437
438DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
439 own_address_type_set, "%llu\n");
440
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700441static int long_term_keys_show(struct seq_file *f, void *ptr)
442{
443 struct hci_dev *hdev = f->private;
444 struct list_head *p, *n;
445
446 hci_dev_lock(hdev);
447 list_for_each_safe(p, n, &hdev->link_keys) {
448 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
449 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
450 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
451 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
452 8, ltk->rand, 16, ltk->val);
453 }
454 hci_dev_unlock(hdev);
455
456 return 0;
457}
458
459static int long_term_keys_open(struct inode *inode, struct file *file)
460{
461 return single_open(file, long_term_keys_show, inode->i_private);
462}
463
464static const struct file_operations long_term_keys_fops = {
465 .open = long_term_keys_open,
466 .read = seq_read,
467 .llseek = seq_lseek,
468 .release = single_release,
469};
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471/* ---- HCI requests ---- */
472
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200475 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 if (hdev->req_status == HCI_REQ_PEND) {
478 hdev->req_result = result;
479 hdev->req_status = HCI_REQ_DONE;
480 wake_up_interruptible(&hdev->req_wait_q);
481 }
482}
483
484static void hci_req_cancel(struct hci_dev *hdev, int err)
485{
486 BT_DBG("%s err 0x%2.2x", hdev->name, err);
487
488 if (hdev->req_status == HCI_REQ_PEND) {
489 hdev->req_result = err;
490 hdev->req_status = HCI_REQ_CANCELED;
491 wake_up_interruptible(&hdev->req_wait_q);
492 }
493}
494
Fengguang Wu77a63e02013-04-20 16:24:31 +0300495static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
496 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300497{
498 struct hci_ev_cmd_complete *ev;
499 struct hci_event_hdr *hdr;
500 struct sk_buff *skb;
501
502 hci_dev_lock(hdev);
503
504 skb = hdev->recv_evt;
505 hdev->recv_evt = NULL;
506
507 hci_dev_unlock(hdev);
508
509 if (!skb)
510 return ERR_PTR(-ENODATA);
511
512 if (skb->len < sizeof(*hdr)) {
513 BT_ERR("Too short HCI event");
514 goto failed;
515 }
516
517 hdr = (void *) skb->data;
518 skb_pull(skb, HCI_EVENT_HDR_SIZE);
519
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300520 if (event) {
521 if (hdr->evt != event)
522 goto failed;
523 return skb;
524 }
525
Johan Hedberg75e84b72013-04-02 13:35:04 +0300526 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
527 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
528 goto failed;
529 }
530
531 if (skb->len < sizeof(*ev)) {
532 BT_ERR("Too short cmd_complete event");
533 goto failed;
534 }
535
536 ev = (void *) skb->data;
537 skb_pull(skb, sizeof(*ev));
538
539 if (opcode == __le16_to_cpu(ev->opcode))
540 return skb;
541
542 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
543 __le16_to_cpu(ev->opcode));
544
545failed:
546 kfree_skb(skb);
547 return ERR_PTR(-ENODATA);
548}
549
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300550struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300551 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300552{
553 DECLARE_WAITQUEUE(wait, current);
554 struct hci_request req;
555 int err = 0;
556
557 BT_DBG("%s", hdev->name);
558
559 hci_req_init(&req, hdev);
560
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300561 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300562
563 hdev->req_status = HCI_REQ_PEND;
564
565 err = hci_req_run(&req, hci_req_sync_complete);
566 if (err < 0)
567 return ERR_PTR(err);
568
569 add_wait_queue(&hdev->req_wait_q, &wait);
570 set_current_state(TASK_INTERRUPTIBLE);
571
572 schedule_timeout(timeout);
573
574 remove_wait_queue(&hdev->req_wait_q, &wait);
575
576 if (signal_pending(current))
577 return ERR_PTR(-EINTR);
578
579 switch (hdev->req_status) {
580 case HCI_REQ_DONE:
581 err = -bt_to_errno(hdev->req_result);
582 break;
583
584 case HCI_REQ_CANCELED:
585 err = -hdev->req_result;
586 break;
587
588 default:
589 err = -ETIMEDOUT;
590 break;
591 }
592
593 hdev->req_status = hdev->req_result = 0;
594
595 BT_DBG("%s end: err %d", hdev->name, err);
596
597 if (err < 0)
598 return ERR_PTR(err);
599
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300600 return hci_get_cmd_complete(hdev, opcode, event);
601}
602EXPORT_SYMBOL(__hci_cmd_sync_ev);
603
604struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300605 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300606{
607 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300608}
609EXPORT_SYMBOL(__hci_cmd_sync);
610
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200612static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200613 void (*func)(struct hci_request *req,
614 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200615 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200617 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 DECLARE_WAITQUEUE(wait, current);
619 int err = 0;
620
621 BT_DBG("%s start", hdev->name);
622
Johan Hedberg42c6b122013-03-05 20:37:49 +0200623 hci_req_init(&req, hdev);
624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 hdev->req_status = HCI_REQ_PEND;
626
Johan Hedberg42c6b122013-03-05 20:37:49 +0200627 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200628
Johan Hedberg42c6b122013-03-05 20:37:49 +0200629 err = hci_req_run(&req, hci_req_sync_complete);
630 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200631 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300632
633 /* ENODATA means the HCI request command queue is empty.
634 * This can happen when a request with conditionals doesn't
635 * trigger any commands to be sent. This is normal behavior
636 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 */
Andre Guedes920c8302013-03-08 11:20:15 -0300638 if (err == -ENODATA)
639 return 0;
640
641 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200642 }
643
Andre Guedesbc4445c2013-03-08 11:20:13 -0300644 add_wait_queue(&hdev->req_wait_q, &wait);
645 set_current_state(TASK_INTERRUPTIBLE);
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 schedule_timeout(timeout);
648
649 remove_wait_queue(&hdev->req_wait_q, &wait);
650
651 if (signal_pending(current))
652 return -EINTR;
653
654 switch (hdev->req_status) {
655 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700656 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 break;
658
659 case HCI_REQ_CANCELED:
660 err = -hdev->req_result;
661 break;
662
663 default:
664 err = -ETIMEDOUT;
665 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700666 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Johan Hedberga5040ef2011-01-10 13:28:59 +0200668 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670 BT_DBG("%s end: err %d", hdev->name, err);
671
672 return err;
673}
674
Johan Hedberg01178cd2013-03-05 20:37:41 +0200675static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676 void (*req)(struct hci_request *req,
677 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200678 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
680 int ret;
681
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200682 if (!test_bit(HCI_UP, &hdev->flags))
683 return -ENETDOWN;
684
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 /* Serialize all requests */
686 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200687 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 hci_req_unlock(hdev);
689
690 return ret;
691}
692
Johan Hedberg42c6b122013-03-05 20:37:49 +0200693static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200695 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
697 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698 set_bit(HCI_RESET, &req->hdev->flags);
699 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
701
Johan Hedberg42c6b122013-03-05 20:37:49 +0200702static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200704 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200707 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200709 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200711
712 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200713 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
715
Johan Hedberg42c6b122013-03-05 20:37:49 +0200716static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200717{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200718 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200719
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200720 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200721 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300722
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700723 /* Read Local Supported Commands */
724 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
725
726 /* Read Local Supported Features */
727 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
728
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300729 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300731
732 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200733 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700734
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700735 /* Read Flow Control Mode */
736 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
737
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700738 /* Read Location Data */
739 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200740}
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200743{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200744 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200745
746 BT_DBG("%s %ld", hdev->name, opt);
747
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300748 /* Reset */
749 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300751
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200752 switch (hdev->dev_type) {
753 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200755 break;
756
757 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200758 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200759 break;
760
761 default:
762 BT_ERR("Unknown device type %d", hdev->dev_type);
763 break;
764 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200765}
766
Johan Hedberg42c6b122013-03-05 20:37:49 +0200767static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200768{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700769 struct hci_dev *hdev = req->hdev;
770
Johan Hedberg2177bab2013-03-05 20:37:43 +0200771 __le16 param;
772 __u8 flt_type;
773
774 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200775 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200776
777 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200778 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200779
780 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200781 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200782
783 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200784 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700786 /* Read Number of Supported IAC */
787 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
788
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700789 /* Read Current IAC LAP */
790 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
791
Johan Hedberg2177bab2013-03-05 20:37:43 +0200792 /* Clear Event Filters */
793 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200794 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200795
796 /* Connection accept timeout ~20 secs */
797 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200798 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200799
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700800 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
801 * but it does not support page scan related HCI commands.
802 */
803 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500804 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
805 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
806 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200807}
808
Johan Hedberg42c6b122013-03-05 20:37:49 +0200809static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200810{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300811 struct hci_dev *hdev = req->hdev;
812
Johan Hedberg2177bab2013-03-05 20:37:43 +0200813 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200814 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200815
816 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200817 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200818
819 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200820 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200821
822 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200823 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200824
825 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200826 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300827
828 /* LE-only controllers have LE implicitly enabled */
829 if (!lmp_bredr_capable(hdev))
830 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200831}
832
833static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
834{
835 if (lmp_ext_inq_capable(hdev))
836 return 0x02;
837
838 if (lmp_inq_rssi_capable(hdev))
839 return 0x01;
840
841 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
842 hdev->lmp_subver == 0x0757)
843 return 0x01;
844
845 if (hdev->manufacturer == 15) {
846 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
847 return 0x01;
848 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
849 return 0x01;
850 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
851 return 0x01;
852 }
853
854 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
855 hdev->lmp_subver == 0x1805)
856 return 0x01;
857
858 return 0x00;
859}
860
Johan Hedberg42c6b122013-03-05 20:37:49 +0200861static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200862{
863 u8 mode;
864
Johan Hedberg42c6b122013-03-05 20:37:49 +0200865 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200866
Johan Hedberg42c6b122013-03-05 20:37:49 +0200867 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200868}
869
Johan Hedberg42c6b122013-03-05 20:37:49 +0200870static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200871{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200872 struct hci_dev *hdev = req->hdev;
873
Johan Hedberg2177bab2013-03-05 20:37:43 +0200874 /* The second byte is 0xff instead of 0x9f (two reserved bits
875 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
876 * command otherwise.
877 */
878 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
879
880 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
881 * any event mask for pre 1.2 devices.
882 */
883 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
884 return;
885
886 if (lmp_bredr_capable(hdev)) {
887 events[4] |= 0x01; /* Flow Specification Complete */
888 events[4] |= 0x02; /* Inquiry Result with RSSI */
889 events[4] |= 0x04; /* Read Remote Extended Features Complete */
890 events[5] |= 0x08; /* Synchronous Connection Complete */
891 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700892 } else {
893 /* Use a different default for LE-only devices */
894 memset(events, 0, sizeof(events));
895 events[0] |= 0x10; /* Disconnection Complete */
896 events[0] |= 0x80; /* Encryption Change */
897 events[1] |= 0x08; /* Read Remote Version Information Complete */
898 events[1] |= 0x20; /* Command Complete */
899 events[1] |= 0x40; /* Command Status */
900 events[1] |= 0x80; /* Hardware Error */
901 events[2] |= 0x04; /* Number of Completed Packets */
902 events[3] |= 0x02; /* Data Buffer Overflow */
903 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200904 }
905
906 if (lmp_inq_rssi_capable(hdev))
907 events[4] |= 0x02; /* Inquiry Result with RSSI */
908
909 if (lmp_sniffsubr_capable(hdev))
910 events[5] |= 0x20; /* Sniff Subrating */
911
912 if (lmp_pause_enc_capable(hdev))
913 events[5] |= 0x80; /* Encryption Key Refresh Complete */
914
915 if (lmp_ext_inq_capable(hdev))
916 events[5] |= 0x40; /* Extended Inquiry Result */
917
918 if (lmp_no_flush_capable(hdev))
919 events[7] |= 0x01; /* Enhanced Flush Complete */
920
921 if (lmp_lsto_capable(hdev))
922 events[6] |= 0x80; /* Link Supervision Timeout Changed */
923
924 if (lmp_ssp_capable(hdev)) {
925 events[6] |= 0x01; /* IO Capability Request */
926 events[6] |= 0x02; /* IO Capability Response */
927 events[6] |= 0x04; /* User Confirmation Request */
928 events[6] |= 0x08; /* User Passkey Request */
929 events[6] |= 0x10; /* Remote OOB Data Request */
930 events[6] |= 0x20; /* Simple Pairing Complete */
931 events[7] |= 0x04; /* User Passkey Notification */
932 events[7] |= 0x08; /* Keypress Notification */
933 events[7] |= 0x10; /* Remote Host Supported
934 * Features Notification
935 */
936 }
937
938 if (lmp_le_capable(hdev))
939 events[7] |= 0x20; /* LE Meta-Event */
940
Johan Hedberg42c6b122013-03-05 20:37:49 +0200941 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200942
943 if (lmp_le_capable(hdev)) {
944 memset(events, 0, sizeof(events));
945 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200946 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
947 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200948 }
949}
950
Johan Hedberg42c6b122013-03-05 20:37:49 +0200951static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200952{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953 struct hci_dev *hdev = req->hdev;
954
Johan Hedberg2177bab2013-03-05 20:37:43 +0200955 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200956 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300957 else
958 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200959
960 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200961 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200962
Johan Hedberg42c6b122013-03-05 20:37:49 +0200963 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200964
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300965 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
966 * local supported commands HCI command.
967 */
968 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200969 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200970
971 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700972 /* When SSP is available, then the host features page
973 * should also be available as well. However some
974 * controllers list the max_page as 0 as long as SSP
975 * has not been enabled. To achieve proper debugging
976 * output, force the minimum max_page to 1 at least.
977 */
978 hdev->max_page = 0x01;
979
Johan Hedberg2177bab2013-03-05 20:37:43 +0200980 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
981 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200982 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
983 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200984 } else {
985 struct hci_cp_write_eir cp;
986
987 memset(hdev->eir, 0, sizeof(hdev->eir));
988 memset(&cp, 0, sizeof(cp));
989
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200991 }
992 }
993
994 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200996
997 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200998 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200999
1000 if (lmp_ext_feat_capable(hdev)) {
1001 struct hci_cp_read_local_ext_features cp;
1002
1003 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1005 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001006 }
1007
1008 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1009 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1011 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001012 }
1013}
1014
Johan Hedberg42c6b122013-03-05 20:37:49 +02001015static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001016{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001017 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001018 struct hci_cp_write_def_link_policy cp;
1019 u16 link_policy = 0;
1020
1021 if (lmp_rswitch_capable(hdev))
1022 link_policy |= HCI_LP_RSWITCH;
1023 if (lmp_hold_capable(hdev))
1024 link_policy |= HCI_LP_HOLD;
1025 if (lmp_sniff_capable(hdev))
1026 link_policy |= HCI_LP_SNIFF;
1027 if (lmp_park_capable(hdev))
1028 link_policy |= HCI_LP_PARK;
1029
1030 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001032}
1033
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001035{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001036 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001037 struct hci_cp_write_le_host_supported cp;
1038
Johan Hedbergc73eee92013-04-19 18:35:21 +03001039 /* LE-only devices do not support explicit enablement */
1040 if (!lmp_bredr_capable(hdev))
1041 return;
1042
Johan Hedberg2177bab2013-03-05 20:37:43 +02001043 memset(&cp, 0, sizeof(cp));
1044
1045 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1046 cp.le = 0x01;
1047 cp.simul = lmp_le_br_capable(hdev);
1048 }
1049
1050 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1052 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001053}
1054
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001055static void hci_set_event_mask_page_2(struct hci_request *req)
1056{
1057 struct hci_dev *hdev = req->hdev;
1058 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1059
1060 /* If Connectionless Slave Broadcast master role is supported
1061 * enable all necessary events for it.
1062 */
1063 if (hdev->features[2][0] & 0x01) {
1064 events[1] |= 0x40; /* Triggered Clock Capture */
1065 events[1] |= 0x80; /* Synchronization Train Complete */
1066 events[2] |= 0x10; /* Slave Page Response Timeout */
1067 events[2] |= 0x20; /* CSB Channel Map Change */
1068 }
1069
1070 /* If Connectionless Slave Broadcast slave role is supported
1071 * enable all necessary events for it.
1072 */
1073 if (hdev->features[2][0] & 0x02) {
1074 events[2] |= 0x01; /* Synchronization Train Received */
1075 events[2] |= 0x02; /* CSB Receive */
1076 events[2] |= 0x04; /* CSB Timeout */
1077 events[2] |= 0x08; /* Truncated Page Complete */
1078 }
1079
1080 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1081}
1082
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001084{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001085 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001086 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001087
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001088 /* Some Broadcom based Bluetooth controllers do not support the
1089 * Delete Stored Link Key command. They are clearly indicating its
1090 * absence in the bit mask of supported commands.
1091 *
1092 * Check the supported commands and only if the the command is marked
1093 * as supported send it. If not supported assume that the controller
1094 * does not have actual support for stored link keys which makes this
1095 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001096 */
Johan Hedberg59f45d52013-06-13 11:01:13 +03001097 if (hdev->commands[6] & 0x80) {
1098 struct hci_cp_delete_stored_link_key cp;
1099
1100 bacpy(&cp.bdaddr, BDADDR_ANY);
1101 cp.delete_all = 0x01;
1102 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1103 sizeof(cp), &cp);
1104 }
1105
Johan Hedberg2177bab2013-03-05 20:37:43 +02001106 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001107 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001108
Marcel Holtmann79830f62013-10-18 16:38:09 -07001109 if (lmp_le_capable(hdev)) {
1110 /* If the controller has a public BD_ADDR, then by
1111 * default use that one. If this is a LE only
1112 * controller without one, default to the random
1113 * address.
1114 */
1115 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1116 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1117 else
1118 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1119
Johan Hedberg42c6b122013-03-05 20:37:49 +02001120 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001121 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001122
1123 /* Read features beyond page 1 if available */
1124 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1125 struct hci_cp_read_local_ext_features cp;
1126
1127 cp.page = p;
1128 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1129 sizeof(cp), &cp);
1130 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131}
1132
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001133static void hci_init4_req(struct hci_request *req, unsigned long opt)
1134{
1135 struct hci_dev *hdev = req->hdev;
1136
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001137 /* Set event mask page 2 if the HCI command for it is supported */
1138 if (hdev->commands[22] & 0x04)
1139 hci_set_event_mask_page_2(req);
1140
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001141 /* Check for Synchronization Train support */
1142 if (hdev->features[2][0] & 0x04)
1143 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1144}
1145
Johan Hedberg2177bab2013-03-05 20:37:43 +02001146static int __hci_init(struct hci_dev *hdev)
1147{
1148 int err;
1149
1150 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1151 if (err < 0)
1152 return err;
1153
1154 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1155 * BR/EDR/LE type controllers. AMP controllers only need the
1156 * first stage init.
1157 */
1158 if (hdev->dev_type != HCI_BREDR)
1159 return 0;
1160
1161 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1162 if (err < 0)
1163 return err;
1164
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001165 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1166 if (err < 0)
1167 return err;
1168
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001169 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1170 if (err < 0)
1171 return err;
1172
1173 /* Only create debugfs entries during the initial setup
1174 * phase and not every time the controller gets powered on.
1175 */
1176 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1177 return 0;
1178
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001179 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1180 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001181 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1182 &hdev->manufacturer);
1183 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1184 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001185 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1186 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001187 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1188
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001189 if (lmp_bredr_capable(hdev)) {
1190 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1191 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001192 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1193 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001194 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1195 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001196 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1197 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001198 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1199 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001200 }
1201
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001202 if (lmp_ssp_capable(hdev))
1203 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1204 hdev, &auto_accept_delay_fops);
1205
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001206 if (lmp_sniff_capable(hdev)) {
1207 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1208 hdev, &idle_timeout_fops);
1209 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1210 hdev, &sniff_min_interval_fops);
1211 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1212 hdev, &sniff_max_interval_fops);
1213 }
1214
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001215 if (lmp_le_capable(hdev)) {
1216 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1217 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001218 debugfs_create_file("static_address", 0444, hdev->debugfs,
1219 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001220 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1221 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001222 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1223 hdev, &long_term_keys_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001224 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001225
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001226 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227}
1228
Johan Hedberg42c6b122013-03-05 20:37:49 +02001229static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230{
1231 __u8 scan = opt;
1232
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237}
1238
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240{
1241 __u8 auth = opt;
1242
Johan Hedberg42c6b122013-03-05 20:37:49 +02001243 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
1245 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001246 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247}
1248
Johan Hedberg42c6b122013-03-05 20:37:49 +02001249static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250{
1251 __u8 encrypt = opt;
1252
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001255 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001256 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257}
1258
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001260{
1261 __le16 policy = cpu_to_le16(opt);
1262
Johan Hedberg42c6b122013-03-05 20:37:49 +02001263 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001264
1265 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001266 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001267}
1268
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001269/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 * Device is held on return. */
1271struct hci_dev *hci_dev_get(int index)
1272{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001273 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
1275 BT_DBG("%d", index);
1276
1277 if (index < 0)
1278 return NULL;
1279
1280 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001281 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 if (d->id == index) {
1283 hdev = hci_dev_hold(d);
1284 break;
1285 }
1286 }
1287 read_unlock(&hci_dev_list_lock);
1288 return hdev;
1289}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
1291/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001292
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001293bool hci_discovery_active(struct hci_dev *hdev)
1294{
1295 struct discovery_state *discov = &hdev->discovery;
1296
Andre Guedes6fbe1952012-02-03 17:47:58 -03001297 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001298 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001299 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001300 return true;
1301
Andre Guedes6fbe1952012-02-03 17:47:58 -03001302 default:
1303 return false;
1304 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001305}
1306
Johan Hedbergff9ef572012-01-04 14:23:45 +02001307void hci_discovery_set_state(struct hci_dev *hdev, int state)
1308{
1309 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1310
1311 if (hdev->discovery.state == state)
1312 return;
1313
1314 switch (state) {
1315 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001316 if (hdev->discovery.state != DISCOVERY_STARTING)
1317 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001318 break;
1319 case DISCOVERY_STARTING:
1320 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001321 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001322 mgmt_discovering(hdev, 1);
1323 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001324 case DISCOVERY_RESOLVING:
1325 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001326 case DISCOVERY_STOPPING:
1327 break;
1328 }
1329
1330 hdev->discovery.state = state;
1331}
1332
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001333void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334{
Johan Hedberg30883512012-01-04 14:16:21 +02001335 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001336 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Johan Hedberg561aafb2012-01-04 13:31:59 +02001338 list_for_each_entry_safe(p, n, &cache->all, all) {
1339 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001340 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001342
1343 INIT_LIST_HEAD(&cache->unknown);
1344 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345}
1346
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001347struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1348 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349{
Johan Hedberg30883512012-01-04 14:16:21 +02001350 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 struct inquiry_entry *e;
1352
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001353 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Johan Hedberg561aafb2012-01-04 13:31:59 +02001355 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001357 return e;
1358 }
1359
1360 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361}
1362
Johan Hedberg561aafb2012-01-04 13:31:59 +02001363struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001364 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001365{
Johan Hedberg30883512012-01-04 14:16:21 +02001366 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001367 struct inquiry_entry *e;
1368
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001369 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001370
1371 list_for_each_entry(e, &cache->unknown, list) {
1372 if (!bacmp(&e->data.bdaddr, bdaddr))
1373 return e;
1374 }
1375
1376 return NULL;
1377}
1378
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001379struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001380 bdaddr_t *bdaddr,
1381 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001382{
1383 struct discovery_state *cache = &hdev->discovery;
1384 struct inquiry_entry *e;
1385
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001386 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001387
1388 list_for_each_entry(e, &cache->resolve, list) {
1389 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1390 return e;
1391 if (!bacmp(&e->data.bdaddr, bdaddr))
1392 return e;
1393 }
1394
1395 return NULL;
1396}
1397
Johan Hedberga3d4e202012-01-09 00:53:02 +02001398void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001399 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001400{
1401 struct discovery_state *cache = &hdev->discovery;
1402 struct list_head *pos = &cache->resolve;
1403 struct inquiry_entry *p;
1404
1405 list_del(&ie->list);
1406
1407 list_for_each_entry(p, &cache->resolve, list) {
1408 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001409 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001410 break;
1411 pos = &p->list;
1412 }
1413
1414 list_add(&ie->list, pos);
1415}
1416
Johan Hedberg31754052012-01-04 13:39:52 +02001417bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001418 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419{
Johan Hedberg30883512012-01-04 14:16:21 +02001420 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001421 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001423 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Szymon Janc2b2fec42012-11-20 11:38:54 +01001425 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1426
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001427 if (ssp)
1428 *ssp = data->ssp_mode;
1429
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001430 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001431 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001432 if (ie->data.ssp_mode && ssp)
1433 *ssp = true;
1434
Johan Hedberga3d4e202012-01-09 00:53:02 +02001435 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001436 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001437 ie->data.rssi = data->rssi;
1438 hci_inquiry_cache_update_resolve(hdev, ie);
1439 }
1440
Johan Hedberg561aafb2012-01-04 13:31:59 +02001441 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001442 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001443
Johan Hedberg561aafb2012-01-04 13:31:59 +02001444 /* Entry not in the cache. Add new one. */
1445 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1446 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001447 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001448
1449 list_add(&ie->all, &cache->all);
1450
1451 if (name_known) {
1452 ie->name_state = NAME_KNOWN;
1453 } else {
1454 ie->name_state = NAME_NOT_KNOWN;
1455 list_add(&ie->list, &cache->unknown);
1456 }
1457
1458update:
1459 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001460 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001461 ie->name_state = NAME_KNOWN;
1462 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 }
1464
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001465 memcpy(&ie->data, data, sizeof(*data));
1466 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001468
1469 if (ie->name_state == NAME_NOT_KNOWN)
1470 return false;
1471
1472 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473}
1474
1475static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1476{
Johan Hedberg30883512012-01-04 14:16:21 +02001477 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 struct inquiry_info *info = (struct inquiry_info *) buf;
1479 struct inquiry_entry *e;
1480 int copied = 0;
1481
Johan Hedberg561aafb2012-01-04 13:31:59 +02001482 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001484
1485 if (copied >= num)
1486 break;
1487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 bacpy(&info->bdaddr, &data->bdaddr);
1489 info->pscan_rep_mode = data->pscan_rep_mode;
1490 info->pscan_period_mode = data->pscan_period_mode;
1491 info->pscan_mode = data->pscan_mode;
1492 memcpy(info->dev_class, data->dev_class, 3);
1493 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001496 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 }
1498
1499 BT_DBG("cache %p, copied %d", cache, copied);
1500 return copied;
1501}
1502
Johan Hedberg42c6b122013-03-05 20:37:49 +02001503static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504{
1505 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 struct hci_cp_inquiry cp;
1508
1509 BT_DBG("%s", hdev->name);
1510
1511 if (test_bit(HCI_INQUIRY, &hdev->flags))
1512 return;
1513
1514 /* Start Inquiry */
1515 memcpy(&cp.lap, &ir->lap, 3);
1516 cp.length = ir->length;
1517 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001518 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519}
1520
Andre Guedes3e13fa12013-03-27 20:04:56 -03001521static int wait_inquiry(void *word)
1522{
1523 schedule();
1524 return signal_pending(current);
1525}
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527int hci_inquiry(void __user *arg)
1528{
1529 __u8 __user *ptr = arg;
1530 struct hci_inquiry_req ir;
1531 struct hci_dev *hdev;
1532 int err = 0, do_inquiry = 0, max_rsp;
1533 long timeo;
1534 __u8 *buf;
1535
1536 if (copy_from_user(&ir, ptr, sizeof(ir)))
1537 return -EFAULT;
1538
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001539 hdev = hci_dev_get(ir.dev_id);
1540 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 return -ENODEV;
1542
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001543 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1544 err = -EBUSY;
1545 goto done;
1546 }
1547
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001548 if (hdev->dev_type != HCI_BREDR) {
1549 err = -EOPNOTSUPP;
1550 goto done;
1551 }
1552
Johan Hedberg56f87902013-10-02 13:43:13 +03001553 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1554 err = -EOPNOTSUPP;
1555 goto done;
1556 }
1557
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001558 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001559 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001560 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001561 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 do_inquiry = 1;
1563 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001564 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
Marcel Holtmann04837f62006-07-03 10:02:33 +02001566 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001567
1568 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001569 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1570 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001571 if (err < 0)
1572 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001573
1574 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1575 * cleared). If it is interrupted by a signal, return -EINTR.
1576 */
1577 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1578 TASK_INTERRUPTIBLE))
1579 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001580 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001582 /* for unlimited number of responses we will use buffer with
1583 * 255 entries
1584 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1586
1587 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1588 * copy it to the user space.
1589 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001590 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001591 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 err = -ENOMEM;
1593 goto done;
1594 }
1595
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001596 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001598 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
1600 BT_DBG("num_rsp %d", ir.num_rsp);
1601
1602 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1603 ptr += sizeof(ir);
1604 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001605 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001607 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 err = -EFAULT;
1609
1610 kfree(buf);
1611
1612done:
1613 hci_dev_put(hdev);
1614 return err;
1615}
1616
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001617static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 int ret = 0;
1620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 BT_DBG("%s %p", hdev->name, hdev);
1622
1623 hci_req_lock(hdev);
1624
Johan Hovold94324962012-03-15 14:48:41 +01001625 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1626 ret = -ENODEV;
1627 goto done;
1628 }
1629
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001630 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1631 /* Check for rfkill but allow the HCI setup stage to
1632 * proceed (which in itself doesn't cause any RF activity).
1633 */
1634 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1635 ret = -ERFKILL;
1636 goto done;
1637 }
1638
1639 /* Check for valid public address or a configured static
1640 * random adddress, but let the HCI setup proceed to
1641 * be able to determine if there is a public address
1642 * or not.
1643 *
1644 * This check is only valid for BR/EDR controllers
1645 * since AMP controllers do not have an address.
1646 */
1647 if (hdev->dev_type == HCI_BREDR &&
1648 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1649 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1650 ret = -EADDRNOTAVAIL;
1651 goto done;
1652 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001653 }
1654
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 if (test_bit(HCI_UP, &hdev->flags)) {
1656 ret = -EALREADY;
1657 goto done;
1658 }
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 if (hdev->open(hdev)) {
1661 ret = -EIO;
1662 goto done;
1663 }
1664
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001665 atomic_set(&hdev->cmd_cnt, 1);
1666 set_bit(HCI_INIT, &hdev->flags);
1667
1668 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1669 ret = hdev->setup(hdev);
1670
1671 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001672 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1673 set_bit(HCI_RAW, &hdev->flags);
1674
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001675 if (!test_bit(HCI_RAW, &hdev->flags) &&
1676 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001677 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 }
1679
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001680 clear_bit(HCI_INIT, &hdev->flags);
1681
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 if (!ret) {
1683 hci_dev_hold(hdev);
1684 set_bit(HCI_UP, &hdev->flags);
1685 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001686 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001687 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001688 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001689 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001690 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001691 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001692 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001693 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001695 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001696 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001697 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
1699 skb_queue_purge(&hdev->cmd_q);
1700 skb_queue_purge(&hdev->rx_q);
1701
1702 if (hdev->flush)
1703 hdev->flush(hdev);
1704
1705 if (hdev->sent_cmd) {
1706 kfree_skb(hdev->sent_cmd);
1707 hdev->sent_cmd = NULL;
1708 }
1709
1710 hdev->close(hdev);
1711 hdev->flags = 0;
1712 }
1713
1714done:
1715 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 return ret;
1717}
1718
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001719/* ---- HCI ioctl helpers ---- */
1720
1721int hci_dev_open(__u16 dev)
1722{
1723 struct hci_dev *hdev;
1724 int err;
1725
1726 hdev = hci_dev_get(dev);
1727 if (!hdev)
1728 return -ENODEV;
1729
Johan Hedberge1d08f42013-10-01 22:44:50 +03001730 /* We need to ensure that no other power on/off work is pending
1731 * before proceeding to call hci_dev_do_open. This is
1732 * particularly important if the setup procedure has not yet
1733 * completed.
1734 */
1735 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1736 cancel_delayed_work(&hdev->power_off);
1737
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001738 /* After this call it is guaranteed that the setup procedure
1739 * has finished. This means that error conditions like RFKILL
1740 * or no valid public or static random address apply.
1741 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001742 flush_workqueue(hdev->req_workqueue);
1743
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001744 err = hci_dev_do_open(hdev);
1745
1746 hci_dev_put(hdev);
1747
1748 return err;
1749}
1750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751static int hci_dev_do_close(struct hci_dev *hdev)
1752{
1753 BT_DBG("%s %p", hdev->name, hdev);
1754
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001755 cancel_delayed_work(&hdev->power_off);
1756
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 hci_req_cancel(hdev, ENODEV);
1758 hci_req_lock(hdev);
1759
1760 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001761 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 hci_req_unlock(hdev);
1763 return 0;
1764 }
1765
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001766 /* Flush RX and TX works */
1767 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001768 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001770 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001771 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001772 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001773 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001774 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001775 }
1776
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001777 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001778 cancel_delayed_work(&hdev->service_cache);
1779
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001780 cancel_delayed_work_sync(&hdev->le_scan_disable);
1781
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001782 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001783 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001785 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
1787 hci_notify(hdev, HCI_DEV_DOWN);
1788
1789 if (hdev->flush)
1790 hdev->flush(hdev);
1791
1792 /* Reset device */
1793 skb_queue_purge(&hdev->cmd_q);
1794 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001795 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001796 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001797 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001799 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 clear_bit(HCI_INIT, &hdev->flags);
1801 }
1802
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001803 /* flush cmd work */
1804 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
1806 /* Drop queues */
1807 skb_queue_purge(&hdev->rx_q);
1808 skb_queue_purge(&hdev->cmd_q);
1809 skb_queue_purge(&hdev->raw_q);
1810
1811 /* Drop last sent command */
1812 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001813 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 kfree_skb(hdev->sent_cmd);
1815 hdev->sent_cmd = NULL;
1816 }
1817
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001818 kfree_skb(hdev->recv_evt);
1819 hdev->recv_evt = NULL;
1820
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 /* After this point our queues are empty
1822 * and no tasks are scheduled. */
1823 hdev->close(hdev);
1824
Johan Hedberg35b973c2013-03-15 17:06:59 -05001825 /* Clear flags */
1826 hdev->flags = 0;
1827 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1828
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001829 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1830 if (hdev->dev_type == HCI_BREDR) {
1831 hci_dev_lock(hdev);
1832 mgmt_powered(hdev, 0);
1833 hci_dev_unlock(hdev);
1834 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001835 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001836
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001837 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001838 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001839
Johan Hedberge59fda82012-02-22 18:11:53 +02001840 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001841 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 hci_req_unlock(hdev);
1844
1845 hci_dev_put(hdev);
1846 return 0;
1847}
1848
1849int hci_dev_close(__u16 dev)
1850{
1851 struct hci_dev *hdev;
1852 int err;
1853
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001854 hdev = hci_dev_get(dev);
1855 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001857
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001858 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1859 err = -EBUSY;
1860 goto done;
1861 }
1862
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001863 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1864 cancel_delayed_work(&hdev->power_off);
1865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001867
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001868done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 hci_dev_put(hdev);
1870 return err;
1871}
1872
1873int hci_dev_reset(__u16 dev)
1874{
1875 struct hci_dev *hdev;
1876 int ret = 0;
1877
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001878 hdev = hci_dev_get(dev);
1879 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 return -ENODEV;
1881
1882 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
Marcel Holtmann808a0492013-08-26 20:57:58 -07001884 if (!test_bit(HCI_UP, &hdev->flags)) {
1885 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001887 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001889 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1890 ret = -EBUSY;
1891 goto done;
1892 }
1893
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 /* Drop queues */
1895 skb_queue_purge(&hdev->rx_q);
1896 skb_queue_purge(&hdev->cmd_q);
1897
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001898 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001899 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001901 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
1903 if (hdev->flush)
1904 hdev->flush(hdev);
1905
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001906 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001907 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
1909 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001910 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
1912done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 hci_req_unlock(hdev);
1914 hci_dev_put(hdev);
1915 return ret;
1916}
1917
1918int hci_dev_reset_stat(__u16 dev)
1919{
1920 struct hci_dev *hdev;
1921 int ret = 0;
1922
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001923 hdev = hci_dev_get(dev);
1924 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 return -ENODEV;
1926
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001927 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1928 ret = -EBUSY;
1929 goto done;
1930 }
1931
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1933
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001934done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 return ret;
1937}
1938
1939int hci_dev_cmd(unsigned int cmd, void __user *arg)
1940{
1941 struct hci_dev *hdev;
1942 struct hci_dev_req dr;
1943 int err = 0;
1944
1945 if (copy_from_user(&dr, arg, sizeof(dr)))
1946 return -EFAULT;
1947
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001948 hdev = hci_dev_get(dr.dev_id);
1949 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 return -ENODEV;
1951
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001952 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1953 err = -EBUSY;
1954 goto done;
1955 }
1956
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001957 if (hdev->dev_type != HCI_BREDR) {
1958 err = -EOPNOTSUPP;
1959 goto done;
1960 }
1961
Johan Hedberg56f87902013-10-02 13:43:13 +03001962 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1963 err = -EOPNOTSUPP;
1964 goto done;
1965 }
1966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 switch (cmd) {
1968 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001969 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1970 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 break;
1972
1973 case HCISETENCRYPT:
1974 if (!lmp_encrypt_capable(hdev)) {
1975 err = -EOPNOTSUPP;
1976 break;
1977 }
1978
1979 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1980 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001981 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1982 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 if (err)
1984 break;
1985 }
1986
Johan Hedberg01178cd2013-03-05 20:37:41 +02001987 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1988 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 break;
1990
1991 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001992 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1993 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 break;
1995
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001996 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001997 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1998 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001999 break;
2000
2001 case HCISETLINKMODE:
2002 hdev->link_mode = ((__u16) dr.dev_opt) &
2003 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2004 break;
2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 case HCISETPTYPE:
2007 hdev->pkt_type = (__u16) dr.dev_opt;
2008 break;
2009
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002011 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2012 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 break;
2014
2015 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002016 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2017 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 break;
2019
2020 default:
2021 err = -EINVAL;
2022 break;
2023 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002024
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002025done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 hci_dev_put(hdev);
2027 return err;
2028}
2029
2030int hci_get_dev_list(void __user *arg)
2031{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002032 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 struct hci_dev_list_req *dl;
2034 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 int n = 0, size, err;
2036 __u16 dev_num;
2037
2038 if (get_user(dev_num, (__u16 __user *) arg))
2039 return -EFAULT;
2040
2041 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2042 return -EINVAL;
2043
2044 size = sizeof(*dl) + dev_num * sizeof(*dr);
2045
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002046 dl = kzalloc(size, GFP_KERNEL);
2047 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 return -ENOMEM;
2049
2050 dr = dl->dev_req;
2051
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002052 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002053 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002054 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002055 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002056
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002057 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2058 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 (dr + n)->dev_id = hdev->id;
2061 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 if (++n >= dev_num)
2064 break;
2065 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002066 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
2068 dl->dev_num = n;
2069 size = sizeof(*dl) + n * sizeof(*dr);
2070
2071 err = copy_to_user(arg, dl, size);
2072 kfree(dl);
2073
2074 return err ? -EFAULT : 0;
2075}
2076
2077int hci_get_dev_info(void __user *arg)
2078{
2079 struct hci_dev *hdev;
2080 struct hci_dev_info di;
2081 int err = 0;
2082
2083 if (copy_from_user(&di, arg, sizeof(di)))
2084 return -EFAULT;
2085
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002086 hdev = hci_dev_get(di.dev_id);
2087 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 return -ENODEV;
2089
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002090 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002091 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002092
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002093 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2094 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 strcpy(di.name, hdev->name);
2097 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002098 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 di.flags = hdev->flags;
2100 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002101 if (lmp_bredr_capable(hdev)) {
2102 di.acl_mtu = hdev->acl_mtu;
2103 di.acl_pkts = hdev->acl_pkts;
2104 di.sco_mtu = hdev->sco_mtu;
2105 di.sco_pkts = hdev->sco_pkts;
2106 } else {
2107 di.acl_mtu = hdev->le_mtu;
2108 di.acl_pkts = hdev->le_pkts;
2109 di.sco_mtu = 0;
2110 di.sco_pkts = 0;
2111 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 di.link_policy = hdev->link_policy;
2113 di.link_mode = hdev->link_mode;
2114
2115 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2116 memcpy(&di.features, &hdev->features, sizeof(di.features));
2117
2118 if (copy_to_user(arg, &di, sizeof(di)))
2119 err = -EFAULT;
2120
2121 hci_dev_put(hdev);
2122
2123 return err;
2124}
2125
2126/* ---- Interface to HCI drivers ---- */
2127
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002128static int hci_rfkill_set_block(void *data, bool blocked)
2129{
2130 struct hci_dev *hdev = data;
2131
2132 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2133
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002134 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2135 return -EBUSY;
2136
Johan Hedberg5e130362013-09-13 08:58:17 +03002137 if (blocked) {
2138 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002139 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2140 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002141 } else {
2142 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002143 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002144
2145 return 0;
2146}
2147
2148static const struct rfkill_ops hci_rfkill_ops = {
2149 .set_block = hci_rfkill_set_block,
2150};
2151
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002152static void hci_power_on(struct work_struct *work)
2153{
2154 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002155 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002156
2157 BT_DBG("%s", hdev->name);
2158
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002159 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002160 if (err < 0) {
2161 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002162 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002163 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002164
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002165 /* During the HCI setup phase, a few error conditions are
2166 * ignored and they need to be checked now. If they are still
2167 * valid, it is important to turn the device back off.
2168 */
2169 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2170 (hdev->dev_type == HCI_BREDR &&
2171 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2172 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002173 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2174 hci_dev_do_close(hdev);
2175 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002176 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2177 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002178 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002179
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002180 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002181 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002182}
2183
2184static void hci_power_off(struct work_struct *work)
2185{
Johan Hedberg32435532011-11-07 22:16:04 +02002186 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002187 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002188
2189 BT_DBG("%s", hdev->name);
2190
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002191 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002192}
2193
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002194static void hci_discov_off(struct work_struct *work)
2195{
2196 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002197
2198 hdev = container_of(work, struct hci_dev, discov_off.work);
2199
2200 BT_DBG("%s", hdev->name);
2201
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002202 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002203}
2204
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002205int hci_uuids_clear(struct hci_dev *hdev)
2206{
Johan Hedberg48210022013-01-27 00:31:28 +02002207 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002208
Johan Hedberg48210022013-01-27 00:31:28 +02002209 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2210 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002211 kfree(uuid);
2212 }
2213
2214 return 0;
2215}
2216
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002217int hci_link_keys_clear(struct hci_dev *hdev)
2218{
2219 struct list_head *p, *n;
2220
2221 list_for_each_safe(p, n, &hdev->link_keys) {
2222 struct link_key *key;
2223
2224 key = list_entry(p, struct link_key, list);
2225
2226 list_del(p);
2227 kfree(key);
2228 }
2229
2230 return 0;
2231}
2232
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002233int hci_smp_ltks_clear(struct hci_dev *hdev)
2234{
2235 struct smp_ltk *k, *tmp;
2236
2237 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2238 list_del(&k->list);
2239 kfree(k);
2240 }
2241
2242 return 0;
2243}
2244
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002245struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2246{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002247 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002248
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002249 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002250 if (bacmp(bdaddr, &k->bdaddr) == 0)
2251 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002252
2253 return NULL;
2254}
2255
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302256static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002257 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002258{
2259 /* Legacy key */
2260 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302261 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002262
2263 /* Debug keys are insecure so don't store them persistently */
2264 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302265 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002266
2267 /* Changed combination key and there's no previous one */
2268 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302269 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002270
2271 /* Security mode 3 case */
2272 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302273 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002274
2275 /* Neither local nor remote side had no-bonding as requirement */
2276 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302277 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002278
2279 /* Local side had dedicated bonding as requirement */
2280 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302281 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002282
2283 /* Remote side had dedicated bonding as requirement */
2284 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302285 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002286
2287 /* If none of the above criteria match, then don't store the key
2288 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302289 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002290}
2291
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002292struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002293{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002294 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002295
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002296 list_for_each_entry(k, &hdev->long_term_keys, list) {
2297 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002298 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002299 continue;
2300
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002301 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002302 }
2303
2304 return NULL;
2305}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002306
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002307struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002308 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002309{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002310 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002311
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002312 list_for_each_entry(k, &hdev->long_term_keys, list)
2313 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002314 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002315 return k;
2316
2317 return NULL;
2318}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002319
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002320int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002321 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002322{
2323 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302324 u8 old_key_type;
2325 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002326
2327 old_key = hci_find_link_key(hdev, bdaddr);
2328 if (old_key) {
2329 old_key_type = old_key->type;
2330 key = old_key;
2331 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002332 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002333 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2334 if (!key)
2335 return -ENOMEM;
2336 list_add(&key->list, &hdev->link_keys);
2337 }
2338
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002339 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002340
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002341 /* Some buggy controller combinations generate a changed
2342 * combination key for legacy pairing even when there's no
2343 * previous key */
2344 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002345 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002346 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002347 if (conn)
2348 conn->key_type = type;
2349 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002350
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002351 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002352 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002353 key->pin_len = pin_len;
2354
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002355 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002356 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002357 else
2358 key->type = type;
2359
Johan Hedberg4df378a2011-04-28 11:29:03 -07002360 if (!new_key)
2361 return 0;
2362
2363 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2364
Johan Hedberg744cf192011-11-08 20:40:14 +02002365 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002366
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302367 if (conn)
2368 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002369
2370 return 0;
2371}
2372
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002373int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002374 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002375 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002376{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002377 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002378
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002379 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2380 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002381
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002382 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2383 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002384 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002385 else {
2386 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002387 if (!key)
2388 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002389 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002390 }
2391
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002392 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002393 key->bdaddr_type = addr_type;
2394 memcpy(key->val, tk, sizeof(key->val));
2395 key->authenticated = authenticated;
2396 key->ediv = ediv;
2397 key->enc_size = enc_size;
2398 key->type = type;
2399 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002400
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002401 if (!new_key)
2402 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002403
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002404 if (type & HCI_SMP_LTK)
2405 mgmt_new_ltk(hdev, key, 1);
2406
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002407 return 0;
2408}
2409
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002410int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2411{
2412 struct link_key *key;
2413
2414 key = hci_find_link_key(hdev, bdaddr);
2415 if (!key)
2416 return -ENOENT;
2417
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002418 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002419
2420 list_del(&key->list);
2421 kfree(key);
2422
2423 return 0;
2424}
2425
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002426int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2427{
2428 struct smp_ltk *k, *tmp;
2429
2430 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2431 if (bacmp(bdaddr, &k->bdaddr))
2432 continue;
2433
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002434 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002435
2436 list_del(&k->list);
2437 kfree(k);
2438 }
2439
2440 return 0;
2441}
2442
Ville Tervo6bd32322011-02-16 16:32:41 +02002443/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002444static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002445{
2446 struct hci_dev *hdev = (void *) arg;
2447
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002448 if (hdev->sent_cmd) {
2449 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2450 u16 opcode = __le16_to_cpu(sent->opcode);
2451
2452 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2453 } else {
2454 BT_ERR("%s command tx timeout", hdev->name);
2455 }
2456
Ville Tervo6bd32322011-02-16 16:32:41 +02002457 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002458 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002459}
2460
Szymon Janc2763eda2011-03-22 13:12:22 +01002461struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002462 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002463{
2464 struct oob_data *data;
2465
2466 list_for_each_entry(data, &hdev->remote_oob_data, list)
2467 if (bacmp(bdaddr, &data->bdaddr) == 0)
2468 return data;
2469
2470 return NULL;
2471}
2472
2473int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2474{
2475 struct oob_data *data;
2476
2477 data = hci_find_remote_oob_data(hdev, bdaddr);
2478 if (!data)
2479 return -ENOENT;
2480
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002481 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002482
2483 list_del(&data->list);
2484 kfree(data);
2485
2486 return 0;
2487}
2488
2489int hci_remote_oob_data_clear(struct hci_dev *hdev)
2490{
2491 struct oob_data *data, *n;
2492
2493 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2494 list_del(&data->list);
2495 kfree(data);
2496 }
2497
2498 return 0;
2499}
2500
2501int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002502 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002503{
2504 struct oob_data *data;
2505
2506 data = hci_find_remote_oob_data(hdev, bdaddr);
2507
2508 if (!data) {
2509 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2510 if (!data)
2511 return -ENOMEM;
2512
2513 bacpy(&data->bdaddr, bdaddr);
2514 list_add(&data->list, &hdev->remote_oob_data);
2515 }
2516
2517 memcpy(data->hash, hash, sizeof(data->hash));
2518 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2519
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002520 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002521
2522 return 0;
2523}
2524
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002525struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2526 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002527{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002528 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002529
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002530 list_for_each_entry(b, &hdev->blacklist, list) {
2531 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002532 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002533 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002534
2535 return NULL;
2536}
2537
2538int hci_blacklist_clear(struct hci_dev *hdev)
2539{
2540 struct list_head *p, *n;
2541
2542 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002543 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002544
2545 list_del(p);
2546 kfree(b);
2547 }
2548
2549 return 0;
2550}
2551
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002552int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002553{
2554 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002555
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002556 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002557 return -EBADF;
2558
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002559 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002560 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002561
2562 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002563 if (!entry)
2564 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002565
2566 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002567 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002568
2569 list_add(&entry->list, &hdev->blacklist);
2570
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002571 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002572}
2573
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002574int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002575{
2576 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002577
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002578 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002579 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002580
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002581 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002582 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002583 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002584
2585 list_del(&entry->list);
2586 kfree(entry);
2587
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002588 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002589}
2590
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002591static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002592{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002593 if (status) {
2594 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002595
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002596 hci_dev_lock(hdev);
2597 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2598 hci_dev_unlock(hdev);
2599 return;
2600 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002601}
2602
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002603static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002604{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002605 /* General inquiry access code (GIAC) */
2606 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2607 struct hci_request req;
2608 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002609 int err;
2610
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002611 if (status) {
2612 BT_ERR("Failed to disable LE scanning: status %d", status);
2613 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002614 }
2615
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002616 switch (hdev->discovery.type) {
2617 case DISCOV_TYPE_LE:
2618 hci_dev_lock(hdev);
2619 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2620 hci_dev_unlock(hdev);
2621 break;
2622
2623 case DISCOV_TYPE_INTERLEAVED:
2624 hci_req_init(&req, hdev);
2625
2626 memset(&cp, 0, sizeof(cp));
2627 memcpy(&cp.lap, lap, sizeof(cp.lap));
2628 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2629 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2630
2631 hci_dev_lock(hdev);
2632
2633 hci_inquiry_cache_flush(hdev);
2634
2635 err = hci_req_run(&req, inquiry_complete);
2636 if (err) {
2637 BT_ERR("Inquiry request failed: err %d", err);
2638 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2639 }
2640
2641 hci_dev_unlock(hdev);
2642 break;
2643 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002644}
2645
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002646static void le_scan_disable_work(struct work_struct *work)
2647{
2648 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002649 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002650 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002651 struct hci_request req;
2652 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002653
2654 BT_DBG("%s", hdev->name);
2655
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002656 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002657
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002658 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002659 cp.enable = LE_SCAN_DISABLE;
2660 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002661
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002662 err = hci_req_run(&req, le_scan_disable_work_complete);
2663 if (err)
2664 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002665}
2666
David Herrmann9be0dab2012-04-22 14:39:57 +02002667/* Alloc HCI device */
2668struct hci_dev *hci_alloc_dev(void)
2669{
2670 struct hci_dev *hdev;
2671
2672 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2673 if (!hdev)
2674 return NULL;
2675
David Herrmannb1b813d2012-04-22 14:39:58 +02002676 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2677 hdev->esco_type = (ESCO_HV1);
2678 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002679 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2680 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002681 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2682 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002683
David Herrmannb1b813d2012-04-22 14:39:58 +02002684 hdev->sniff_max_interval = 800;
2685 hdev->sniff_min_interval = 80;
2686
Marcel Holtmannbef64732013-10-11 08:23:19 -07002687 hdev->le_scan_interval = 0x0060;
2688 hdev->le_scan_window = 0x0030;
2689
David Herrmannb1b813d2012-04-22 14:39:58 +02002690 mutex_init(&hdev->lock);
2691 mutex_init(&hdev->req_lock);
2692
2693 INIT_LIST_HEAD(&hdev->mgmt_pending);
2694 INIT_LIST_HEAD(&hdev->blacklist);
2695 INIT_LIST_HEAD(&hdev->uuids);
2696 INIT_LIST_HEAD(&hdev->link_keys);
2697 INIT_LIST_HEAD(&hdev->long_term_keys);
2698 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002699 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002700
2701 INIT_WORK(&hdev->rx_work, hci_rx_work);
2702 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2703 INIT_WORK(&hdev->tx_work, hci_tx_work);
2704 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002705
David Herrmannb1b813d2012-04-22 14:39:58 +02002706 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2707 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2708 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2709
David Herrmannb1b813d2012-04-22 14:39:58 +02002710 skb_queue_head_init(&hdev->rx_q);
2711 skb_queue_head_init(&hdev->cmd_q);
2712 skb_queue_head_init(&hdev->raw_q);
2713
2714 init_waitqueue_head(&hdev->req_wait_q);
2715
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002716 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002717
David Herrmannb1b813d2012-04-22 14:39:58 +02002718 hci_init_sysfs(hdev);
2719 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002720
2721 return hdev;
2722}
2723EXPORT_SYMBOL(hci_alloc_dev);
2724
2725/* Free HCI device */
2726void hci_free_dev(struct hci_dev *hdev)
2727{
David Herrmann9be0dab2012-04-22 14:39:57 +02002728 /* will free via device release */
2729 put_device(&hdev->dev);
2730}
2731EXPORT_SYMBOL(hci_free_dev);
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733/* Register HCI device */
2734int hci_register_dev(struct hci_dev *hdev)
2735{
David Herrmannb1b813d2012-04-22 14:39:58 +02002736 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737
David Herrmann010666a2012-01-07 15:47:07 +01002738 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 return -EINVAL;
2740
Mat Martineau08add512011-11-02 16:18:36 -07002741 /* Do not allow HCI_AMP devices to register at index 0,
2742 * so the index can be used as the AMP controller ID.
2743 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002744 switch (hdev->dev_type) {
2745 case HCI_BREDR:
2746 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2747 break;
2748 case HCI_AMP:
2749 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2750 break;
2751 default:
2752 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002754
Sasha Levin3df92b32012-05-27 22:36:56 +02002755 if (id < 0)
2756 return id;
2757
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 sprintf(hdev->name, "hci%d", id);
2759 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002760
2761 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2762
Kees Cookd8537542013-07-03 15:04:57 -07002763 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2764 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002765 if (!hdev->workqueue) {
2766 error = -ENOMEM;
2767 goto err;
2768 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002769
Kees Cookd8537542013-07-03 15:04:57 -07002770 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2771 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002772 if (!hdev->req_workqueue) {
2773 destroy_workqueue(hdev->workqueue);
2774 error = -ENOMEM;
2775 goto err;
2776 }
2777
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002778 if (!IS_ERR_OR_NULL(bt_debugfs))
2779 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2780
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002781 dev_set_name(&hdev->dev, "%s", hdev->name);
2782
2783 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002784 if (error < 0)
2785 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002787 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002788 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2789 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002790 if (hdev->rfkill) {
2791 if (rfkill_register(hdev->rfkill) < 0) {
2792 rfkill_destroy(hdev->rfkill);
2793 hdev->rfkill = NULL;
2794 }
2795 }
2796
Johan Hedberg5e130362013-09-13 08:58:17 +03002797 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2798 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2799
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002800 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002801 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002802
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002803 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002804 /* Assume BR/EDR support until proven otherwise (such as
2805 * through reading supported features during init.
2806 */
2807 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2808 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002809
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002810 write_lock(&hci_dev_list_lock);
2811 list_add(&hdev->list, &hci_dev_list);
2812 write_unlock(&hci_dev_list_lock);
2813
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002815 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
Johan Hedberg19202572013-01-14 22:33:51 +02002817 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002818
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002820
David Herrmann33ca9542011-10-08 14:58:49 +02002821err_wqueue:
2822 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002823 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002824err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002825 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002826
David Herrmann33ca9542011-10-08 14:58:49 +02002827 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828}
2829EXPORT_SYMBOL(hci_register_dev);
2830
2831/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002832void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833{
Sasha Levin3df92b32012-05-27 22:36:56 +02002834 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002835
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002836 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
Johan Hovold94324962012-03-15 14:48:41 +01002838 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2839
Sasha Levin3df92b32012-05-27 22:36:56 +02002840 id = hdev->id;
2841
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002842 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002844 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
2846 hci_dev_do_close(hdev);
2847
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302848 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002849 kfree_skb(hdev->reassembly[i]);
2850
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002851 cancel_work_sync(&hdev->power_on);
2852
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002853 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002854 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002855 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002856 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002857 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002858 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002859
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002860 /* mgmt_index_removed should take care of emptying the
2861 * pending list */
2862 BUG_ON(!list_empty(&hdev->mgmt_pending));
2863
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 hci_notify(hdev, HCI_DEV_UNREG);
2865
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002866 if (hdev->rfkill) {
2867 rfkill_unregister(hdev->rfkill);
2868 rfkill_destroy(hdev->rfkill);
2869 }
2870
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002871 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002872
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002873 debugfs_remove_recursive(hdev->debugfs);
2874
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002875 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002876 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002877
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002878 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002879 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002880 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002881 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002882 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002883 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002884 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002885
David Herrmanndc946bd2012-01-07 15:47:24 +01002886 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002887
2888 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889}
2890EXPORT_SYMBOL(hci_unregister_dev);
2891
2892/* Suspend HCI device */
2893int hci_suspend_dev(struct hci_dev *hdev)
2894{
2895 hci_notify(hdev, HCI_DEV_SUSPEND);
2896 return 0;
2897}
2898EXPORT_SYMBOL(hci_suspend_dev);
2899
2900/* Resume HCI device */
2901int hci_resume_dev(struct hci_dev *hdev)
2902{
2903 hci_notify(hdev, HCI_DEV_RESUME);
2904 return 0;
2905}
2906EXPORT_SYMBOL(hci_resume_dev);
2907
Marcel Holtmann76bca882009-11-18 00:40:39 +01002908/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002909int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002910{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002911 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002912 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002913 kfree_skb(skb);
2914 return -ENXIO;
2915 }
2916
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002917 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002918 bt_cb(skb)->incoming = 1;
2919
2920 /* Time stamp */
2921 __net_timestamp(skb);
2922
Marcel Holtmann76bca882009-11-18 00:40:39 +01002923 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002924 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002925
Marcel Holtmann76bca882009-11-18 00:40:39 +01002926 return 0;
2927}
2928EXPORT_SYMBOL(hci_recv_frame);
2929
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302930static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002931 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302932{
2933 int len = 0;
2934 int hlen = 0;
2935 int remain = count;
2936 struct sk_buff *skb;
2937 struct bt_skb_cb *scb;
2938
2939 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002940 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302941 return -EILSEQ;
2942
2943 skb = hdev->reassembly[index];
2944
2945 if (!skb) {
2946 switch (type) {
2947 case HCI_ACLDATA_PKT:
2948 len = HCI_MAX_FRAME_SIZE;
2949 hlen = HCI_ACL_HDR_SIZE;
2950 break;
2951 case HCI_EVENT_PKT:
2952 len = HCI_MAX_EVENT_SIZE;
2953 hlen = HCI_EVENT_HDR_SIZE;
2954 break;
2955 case HCI_SCODATA_PKT:
2956 len = HCI_MAX_SCO_SIZE;
2957 hlen = HCI_SCO_HDR_SIZE;
2958 break;
2959 }
2960
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002961 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302962 if (!skb)
2963 return -ENOMEM;
2964
2965 scb = (void *) skb->cb;
2966 scb->expect = hlen;
2967 scb->pkt_type = type;
2968
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302969 hdev->reassembly[index] = skb;
2970 }
2971
2972 while (count) {
2973 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002974 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302975
2976 memcpy(skb_put(skb, len), data, len);
2977
2978 count -= len;
2979 data += len;
2980 scb->expect -= len;
2981 remain = count;
2982
2983 switch (type) {
2984 case HCI_EVENT_PKT:
2985 if (skb->len == HCI_EVENT_HDR_SIZE) {
2986 struct hci_event_hdr *h = hci_event_hdr(skb);
2987 scb->expect = h->plen;
2988
2989 if (skb_tailroom(skb) < scb->expect) {
2990 kfree_skb(skb);
2991 hdev->reassembly[index] = NULL;
2992 return -ENOMEM;
2993 }
2994 }
2995 break;
2996
2997 case HCI_ACLDATA_PKT:
2998 if (skb->len == HCI_ACL_HDR_SIZE) {
2999 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3000 scb->expect = __le16_to_cpu(h->dlen);
3001
3002 if (skb_tailroom(skb) < scb->expect) {
3003 kfree_skb(skb);
3004 hdev->reassembly[index] = NULL;
3005 return -ENOMEM;
3006 }
3007 }
3008 break;
3009
3010 case HCI_SCODATA_PKT:
3011 if (skb->len == HCI_SCO_HDR_SIZE) {
3012 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3013 scb->expect = h->dlen;
3014
3015 if (skb_tailroom(skb) < scb->expect) {
3016 kfree_skb(skb);
3017 hdev->reassembly[index] = NULL;
3018 return -ENOMEM;
3019 }
3020 }
3021 break;
3022 }
3023
3024 if (scb->expect == 0) {
3025 /* Complete frame */
3026
3027 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003028 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303029
3030 hdev->reassembly[index] = NULL;
3031 return remain;
3032 }
3033 }
3034
3035 return remain;
3036}
3037
Marcel Holtmannef222012007-07-11 06:42:04 +02003038int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3039{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303040 int rem = 0;
3041
Marcel Holtmannef222012007-07-11 06:42:04 +02003042 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3043 return -EILSEQ;
3044
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003045 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003046 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303047 if (rem < 0)
3048 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003049
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303050 data += (count - rem);
3051 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003052 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003053
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303054 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003055}
3056EXPORT_SYMBOL(hci_recv_fragment);
3057
Suraj Sumangala99811512010-07-14 13:02:19 +05303058#define STREAM_REASSEMBLY 0
3059
3060int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3061{
3062 int type;
3063 int rem = 0;
3064
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003065 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303066 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3067
3068 if (!skb) {
3069 struct { char type; } *pkt;
3070
3071 /* Start of the frame */
3072 pkt = data;
3073 type = pkt->type;
3074
3075 data++;
3076 count--;
3077 } else
3078 type = bt_cb(skb)->pkt_type;
3079
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003080 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003081 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303082 if (rem < 0)
3083 return rem;
3084
3085 data += (count - rem);
3086 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003087 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303088
3089 return rem;
3090}
3091EXPORT_SYMBOL(hci_recv_stream_fragment);
3092
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093/* ---- Interface to upper protocols ---- */
3094
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095int hci_register_cb(struct hci_cb *cb)
3096{
3097 BT_DBG("%p name %s", cb, cb->name);
3098
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003099 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003101 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102
3103 return 0;
3104}
3105EXPORT_SYMBOL(hci_register_cb);
3106
3107int hci_unregister_cb(struct hci_cb *cb)
3108{
3109 BT_DBG("%p name %s", cb, cb->name);
3110
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003111 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003113 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114
3115 return 0;
3116}
3117EXPORT_SYMBOL(hci_unregister_cb);
3118
Marcel Holtmann51086992013-10-10 14:54:19 -07003119static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003121 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003123 /* Time stamp */
3124 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003126 /* Send copy to monitor */
3127 hci_send_to_monitor(hdev, skb);
3128
3129 if (atomic_read(&hdev->promisc)) {
3130 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003131 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 }
3133
3134 /* Get rid of skb owner, prior to sending to the driver. */
3135 skb_orphan(skb);
3136
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003137 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003138 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139}
3140
Johan Hedberg3119ae92013-03-05 20:37:44 +02003141void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3142{
3143 skb_queue_head_init(&req->cmd_q);
3144 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003145 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003146}
3147
3148int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3149{
3150 struct hci_dev *hdev = req->hdev;
3151 struct sk_buff *skb;
3152 unsigned long flags;
3153
3154 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3155
Andre Guedes5d73e032013-03-08 11:20:16 -03003156 /* If an error occured during request building, remove all HCI
3157 * commands queued on the HCI request queue.
3158 */
3159 if (req->err) {
3160 skb_queue_purge(&req->cmd_q);
3161 return req->err;
3162 }
3163
Johan Hedberg3119ae92013-03-05 20:37:44 +02003164 /* Do not allow empty requests */
3165 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003166 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003167
3168 skb = skb_peek_tail(&req->cmd_q);
3169 bt_cb(skb)->req.complete = complete;
3170
3171 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3172 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3173 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3174
3175 queue_work(hdev->workqueue, &hdev->cmd_work);
3176
3177 return 0;
3178}
3179
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003180static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003181 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182{
3183 int len = HCI_COMMAND_HDR_SIZE + plen;
3184 struct hci_command_hdr *hdr;
3185 struct sk_buff *skb;
3186
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003188 if (!skb)
3189 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190
3191 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003192 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193 hdr->plen = plen;
3194
3195 if (plen)
3196 memcpy(skb_put(skb, plen), param, plen);
3197
3198 BT_DBG("skb len %d", skb->len);
3199
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003200 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003201
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003202 return skb;
3203}
3204
3205/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003206int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3207 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003208{
3209 struct sk_buff *skb;
3210
3211 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3212
3213 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3214 if (!skb) {
3215 BT_ERR("%s no memory for command", hdev->name);
3216 return -ENOMEM;
3217 }
3218
Johan Hedberg11714b32013-03-05 20:37:47 +02003219 /* Stand-alone HCI commands must be flaged as
3220 * single-command requests.
3221 */
3222 bt_cb(skb)->req.start = true;
3223
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003225 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226
3227 return 0;
3228}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229
Johan Hedberg71c76a12013-03-05 20:37:46 +02003230/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003231void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3232 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003233{
3234 struct hci_dev *hdev = req->hdev;
3235 struct sk_buff *skb;
3236
3237 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3238
Andre Guedes34739c12013-03-08 11:20:18 -03003239 /* If an error occured during request building, there is no point in
3240 * queueing the HCI command. We can simply return.
3241 */
3242 if (req->err)
3243 return;
3244
Johan Hedberg71c76a12013-03-05 20:37:46 +02003245 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3246 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003247 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3248 hdev->name, opcode);
3249 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003250 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003251 }
3252
3253 if (skb_queue_empty(&req->cmd_q))
3254 bt_cb(skb)->req.start = true;
3255
Johan Hedberg02350a72013-04-03 21:50:29 +03003256 bt_cb(skb)->req.event = event;
3257
Johan Hedberg71c76a12013-03-05 20:37:46 +02003258 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003259}
3260
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003261void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3262 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003263{
3264 hci_req_add_ev(req, opcode, plen, param, 0);
3265}
3266
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003268void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269{
3270 struct hci_command_hdr *hdr;
3271
3272 if (!hdev->sent_cmd)
3273 return NULL;
3274
3275 hdr = (void *) hdev->sent_cmd->data;
3276
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003277 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 return NULL;
3279
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003280 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281
3282 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3283}
3284
3285/* Send ACL data */
3286static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3287{
3288 struct hci_acl_hdr *hdr;
3289 int len = skb->len;
3290
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003291 skb_push(skb, HCI_ACL_HDR_SIZE);
3292 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003293 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003294 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3295 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296}
3297
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003298static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003299 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003301 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 struct hci_dev *hdev = conn->hdev;
3303 struct sk_buff *list;
3304
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003305 skb->len = skb_headlen(skb);
3306 skb->data_len = 0;
3307
3308 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003309
3310 switch (hdev->dev_type) {
3311 case HCI_BREDR:
3312 hci_add_acl_hdr(skb, conn->handle, flags);
3313 break;
3314 case HCI_AMP:
3315 hci_add_acl_hdr(skb, chan->handle, flags);
3316 break;
3317 default:
3318 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3319 return;
3320 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003321
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003322 list = skb_shinfo(skb)->frag_list;
3323 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324 /* Non fragmented */
3325 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3326
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003327 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 } else {
3329 /* Fragmented */
3330 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3331
3332 skb_shinfo(skb)->frag_list = NULL;
3333
3334 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003335 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003337 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003338
3339 flags &= ~ACL_START;
3340 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 do {
3342 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003343
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003344 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003345 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346
3347 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3348
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003349 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 } while (list);
3351
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003352 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003354}
3355
3356void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3357{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003358 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003359
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003360 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003361
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003362 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003364 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366
3367/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003368void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369{
3370 struct hci_dev *hdev = conn->hdev;
3371 struct hci_sco_hdr hdr;
3372
3373 BT_DBG("%s len %d", hdev->name, skb->len);
3374
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003375 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 hdr.dlen = skb->len;
3377
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003378 skb_push(skb, HCI_SCO_HDR_SIZE);
3379 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003380 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003382 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003383
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003385 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387
3388/* ---- HCI TX task (outgoing data) ---- */
3389
3390/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003391static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3392 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393{
3394 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003395 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003396 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003398 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003400
3401 rcu_read_lock();
3402
3403 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003404 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003406
3407 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3408 continue;
3409
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 num++;
3411
3412 if (c->sent < min) {
3413 min = c->sent;
3414 conn = c;
3415 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003416
3417 if (hci_conn_num(hdev, type) == num)
3418 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 }
3420
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003421 rcu_read_unlock();
3422
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003424 int cnt, q;
3425
3426 switch (conn->type) {
3427 case ACL_LINK:
3428 cnt = hdev->acl_cnt;
3429 break;
3430 case SCO_LINK:
3431 case ESCO_LINK:
3432 cnt = hdev->sco_cnt;
3433 break;
3434 case LE_LINK:
3435 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3436 break;
3437 default:
3438 cnt = 0;
3439 BT_ERR("Unknown link type");
3440 }
3441
3442 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443 *quote = q ? q : 1;
3444 } else
3445 *quote = 0;
3446
3447 BT_DBG("conn %p quote %d", conn, *quote);
3448 return conn;
3449}
3450
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003451static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452{
3453 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003454 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455
Ville Tervobae1f5d92011-02-10 22:38:53 -03003456 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003458 rcu_read_lock();
3459
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003461 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003462 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003463 BT_ERR("%s killing stalled connection %pMR",
3464 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003465 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 }
3467 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003468
3469 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470}
3471
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003472static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3473 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003474{
3475 struct hci_conn_hash *h = &hdev->conn_hash;
3476 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003477 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003478 struct hci_conn *conn;
3479 int cnt, q, conn_num = 0;
3480
3481 BT_DBG("%s", hdev->name);
3482
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003483 rcu_read_lock();
3484
3485 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003486 struct hci_chan *tmp;
3487
3488 if (conn->type != type)
3489 continue;
3490
3491 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3492 continue;
3493
3494 conn_num++;
3495
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003496 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003497 struct sk_buff *skb;
3498
3499 if (skb_queue_empty(&tmp->data_q))
3500 continue;
3501
3502 skb = skb_peek(&tmp->data_q);
3503 if (skb->priority < cur_prio)
3504 continue;
3505
3506 if (skb->priority > cur_prio) {
3507 num = 0;
3508 min = ~0;
3509 cur_prio = skb->priority;
3510 }
3511
3512 num++;
3513
3514 if (conn->sent < min) {
3515 min = conn->sent;
3516 chan = tmp;
3517 }
3518 }
3519
3520 if (hci_conn_num(hdev, type) == conn_num)
3521 break;
3522 }
3523
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003524 rcu_read_unlock();
3525
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003526 if (!chan)
3527 return NULL;
3528
3529 switch (chan->conn->type) {
3530 case ACL_LINK:
3531 cnt = hdev->acl_cnt;
3532 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003533 case AMP_LINK:
3534 cnt = hdev->block_cnt;
3535 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003536 case SCO_LINK:
3537 case ESCO_LINK:
3538 cnt = hdev->sco_cnt;
3539 break;
3540 case LE_LINK:
3541 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3542 break;
3543 default:
3544 cnt = 0;
3545 BT_ERR("Unknown link type");
3546 }
3547
3548 q = cnt / num;
3549 *quote = q ? q : 1;
3550 BT_DBG("chan %p quote %d", chan, *quote);
3551 return chan;
3552}
3553
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003554static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3555{
3556 struct hci_conn_hash *h = &hdev->conn_hash;
3557 struct hci_conn *conn;
3558 int num = 0;
3559
3560 BT_DBG("%s", hdev->name);
3561
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003562 rcu_read_lock();
3563
3564 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003565 struct hci_chan *chan;
3566
3567 if (conn->type != type)
3568 continue;
3569
3570 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3571 continue;
3572
3573 num++;
3574
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003575 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003576 struct sk_buff *skb;
3577
3578 if (chan->sent) {
3579 chan->sent = 0;
3580 continue;
3581 }
3582
3583 if (skb_queue_empty(&chan->data_q))
3584 continue;
3585
3586 skb = skb_peek(&chan->data_q);
3587 if (skb->priority >= HCI_PRIO_MAX - 1)
3588 continue;
3589
3590 skb->priority = HCI_PRIO_MAX - 1;
3591
3592 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003593 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003594 }
3595
3596 if (hci_conn_num(hdev, type) == num)
3597 break;
3598 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003599
3600 rcu_read_unlock();
3601
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003602}
3603
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003604static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3605{
3606 /* Calculate count of blocks used by this packet */
3607 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3608}
3609
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003610static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 if (!test_bit(HCI_RAW, &hdev->flags)) {
3613 /* ACL tx timeout must be longer than maximum
3614 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003615 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003616 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003617 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003619}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003621static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003622{
3623 unsigned int cnt = hdev->acl_cnt;
3624 struct hci_chan *chan;
3625 struct sk_buff *skb;
3626 int quote;
3627
3628 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003629
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003630 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003631 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003632 u32 priority = (skb_peek(&chan->data_q))->priority;
3633 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003634 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003635 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003636
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003637 /* Stop if priority has changed */
3638 if (skb->priority < priority)
3639 break;
3640
3641 skb = skb_dequeue(&chan->data_q);
3642
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003643 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003644 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003645
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003646 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 hdev->acl_last_tx = jiffies;
3648
3649 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003650 chan->sent++;
3651 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652 }
3653 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003654
3655 if (cnt != hdev->acl_cnt)
3656 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657}
3658
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003659static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003660{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003661 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003662 struct hci_chan *chan;
3663 struct sk_buff *skb;
3664 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003665 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003666
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003667 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003668
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003669 BT_DBG("%s", hdev->name);
3670
3671 if (hdev->dev_type == HCI_AMP)
3672 type = AMP_LINK;
3673 else
3674 type = ACL_LINK;
3675
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003676 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003677 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003678 u32 priority = (skb_peek(&chan->data_q))->priority;
3679 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3680 int blocks;
3681
3682 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003683 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003684
3685 /* Stop if priority has changed */
3686 if (skb->priority < priority)
3687 break;
3688
3689 skb = skb_dequeue(&chan->data_q);
3690
3691 blocks = __get_blocks(hdev, skb);
3692 if (blocks > hdev->block_cnt)
3693 return;
3694
3695 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003696 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003697
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003698 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003699 hdev->acl_last_tx = jiffies;
3700
3701 hdev->block_cnt -= blocks;
3702 quote -= blocks;
3703
3704 chan->sent += blocks;
3705 chan->conn->sent += blocks;
3706 }
3707 }
3708
3709 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003710 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003711}
3712
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003713static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003714{
3715 BT_DBG("%s", hdev->name);
3716
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003717 /* No ACL link over BR/EDR controller */
3718 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3719 return;
3720
3721 /* No AMP link over AMP controller */
3722 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003723 return;
3724
3725 switch (hdev->flow_ctl_mode) {
3726 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3727 hci_sched_acl_pkt(hdev);
3728 break;
3729
3730 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3731 hci_sched_acl_blk(hdev);
3732 break;
3733 }
3734}
3735
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003737static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738{
3739 struct hci_conn *conn;
3740 struct sk_buff *skb;
3741 int quote;
3742
3743 BT_DBG("%s", hdev->name);
3744
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003745 if (!hci_conn_num(hdev, SCO_LINK))
3746 return;
3747
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3749 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3750 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003751 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752
3753 conn->sent++;
3754 if (conn->sent == ~0)
3755 conn->sent = 0;
3756 }
3757 }
3758}
3759
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003760static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003761{
3762 struct hci_conn *conn;
3763 struct sk_buff *skb;
3764 int quote;
3765
3766 BT_DBG("%s", hdev->name);
3767
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003768 if (!hci_conn_num(hdev, ESCO_LINK))
3769 return;
3770
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003771 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3772 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003773 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3774 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003775 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003776
3777 conn->sent++;
3778 if (conn->sent == ~0)
3779 conn->sent = 0;
3780 }
3781 }
3782}
3783
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003784static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003785{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003786 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003787 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003788 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003789
3790 BT_DBG("%s", hdev->name);
3791
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003792 if (!hci_conn_num(hdev, LE_LINK))
3793 return;
3794
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003795 if (!test_bit(HCI_RAW, &hdev->flags)) {
3796 /* LE tx timeout must be longer than maximum
3797 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003798 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003799 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003800 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003801 }
3802
3803 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003804 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003805 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003806 u32 priority = (skb_peek(&chan->data_q))->priority;
3807 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003808 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003809 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003810
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003811 /* Stop if priority has changed */
3812 if (skb->priority < priority)
3813 break;
3814
3815 skb = skb_dequeue(&chan->data_q);
3816
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003817 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003818 hdev->le_last_tx = jiffies;
3819
3820 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003821 chan->sent++;
3822 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003823 }
3824 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003825
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003826 if (hdev->le_pkts)
3827 hdev->le_cnt = cnt;
3828 else
3829 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003830
3831 if (cnt != tmp)
3832 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003833}
3834
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003835static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003837 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 struct sk_buff *skb;
3839
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003840 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003841 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842
Marcel Holtmann52de5992013-09-03 18:08:38 -07003843 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3844 /* Schedule queues and send stuff to HCI driver */
3845 hci_sched_acl(hdev);
3846 hci_sched_sco(hdev);
3847 hci_sched_esco(hdev);
3848 hci_sched_le(hdev);
3849 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003850
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851 /* Send next queued raw (unknown type) packet */
3852 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003853 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854}
3855
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003856/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857
3858/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003859static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860{
3861 struct hci_acl_hdr *hdr = (void *) skb->data;
3862 struct hci_conn *conn;
3863 __u16 handle, flags;
3864
3865 skb_pull(skb, HCI_ACL_HDR_SIZE);
3866
3867 handle = __le16_to_cpu(hdr->handle);
3868 flags = hci_flags(handle);
3869 handle = hci_handle(handle);
3870
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003871 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003872 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873
3874 hdev->stat.acl_rx++;
3875
3876 hci_dev_lock(hdev);
3877 conn = hci_conn_hash_lookup_handle(hdev, handle);
3878 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003879
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003881 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003882
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003884 l2cap_recv_acldata(conn, skb, flags);
3885 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003887 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003888 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 }
3890
3891 kfree_skb(skb);
3892}
3893
3894/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003895static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896{
3897 struct hci_sco_hdr *hdr = (void *) skb->data;
3898 struct hci_conn *conn;
3899 __u16 handle;
3900
3901 skb_pull(skb, HCI_SCO_HDR_SIZE);
3902
3903 handle = __le16_to_cpu(hdr->handle);
3904
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003905 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906
3907 hdev->stat.sco_rx++;
3908
3909 hci_dev_lock(hdev);
3910 conn = hci_conn_hash_lookup_handle(hdev, handle);
3911 hci_dev_unlock(hdev);
3912
3913 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003915 sco_recv_scodata(conn, skb);
3916 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003918 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003919 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920 }
3921
3922 kfree_skb(skb);
3923}
3924
Johan Hedberg9238f362013-03-05 20:37:48 +02003925static bool hci_req_is_complete(struct hci_dev *hdev)
3926{
3927 struct sk_buff *skb;
3928
3929 skb = skb_peek(&hdev->cmd_q);
3930 if (!skb)
3931 return true;
3932
3933 return bt_cb(skb)->req.start;
3934}
3935
Johan Hedberg42c6b122013-03-05 20:37:49 +02003936static void hci_resend_last(struct hci_dev *hdev)
3937{
3938 struct hci_command_hdr *sent;
3939 struct sk_buff *skb;
3940 u16 opcode;
3941
3942 if (!hdev->sent_cmd)
3943 return;
3944
3945 sent = (void *) hdev->sent_cmd->data;
3946 opcode = __le16_to_cpu(sent->opcode);
3947 if (opcode == HCI_OP_RESET)
3948 return;
3949
3950 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3951 if (!skb)
3952 return;
3953
3954 skb_queue_head(&hdev->cmd_q, skb);
3955 queue_work(hdev->workqueue, &hdev->cmd_work);
3956}
3957
Johan Hedberg9238f362013-03-05 20:37:48 +02003958void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3959{
3960 hci_req_complete_t req_complete = NULL;
3961 struct sk_buff *skb;
3962 unsigned long flags;
3963
3964 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3965
Johan Hedberg42c6b122013-03-05 20:37:49 +02003966 /* If the completed command doesn't match the last one that was
3967 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003968 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003969 if (!hci_sent_cmd_data(hdev, opcode)) {
3970 /* Some CSR based controllers generate a spontaneous
3971 * reset complete event during init and any pending
3972 * command will never be completed. In such a case we
3973 * need to resend whatever was the last sent
3974 * command.
3975 */
3976 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3977 hci_resend_last(hdev);
3978
Johan Hedberg9238f362013-03-05 20:37:48 +02003979 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003980 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003981
3982 /* If the command succeeded and there's still more commands in
3983 * this request the request is not yet complete.
3984 */
3985 if (!status && !hci_req_is_complete(hdev))
3986 return;
3987
3988 /* If this was the last command in a request the complete
3989 * callback would be found in hdev->sent_cmd instead of the
3990 * command queue (hdev->cmd_q).
3991 */
3992 if (hdev->sent_cmd) {
3993 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003994
3995 if (req_complete) {
3996 /* We must set the complete callback to NULL to
3997 * avoid calling the callback more than once if
3998 * this function gets called again.
3999 */
4000 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4001
Johan Hedberg9238f362013-03-05 20:37:48 +02004002 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004003 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004004 }
4005
4006 /* Remove all pending commands belonging to this request */
4007 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4008 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4009 if (bt_cb(skb)->req.start) {
4010 __skb_queue_head(&hdev->cmd_q, skb);
4011 break;
4012 }
4013
4014 req_complete = bt_cb(skb)->req.complete;
4015 kfree_skb(skb);
4016 }
4017 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4018
4019call_complete:
4020 if (req_complete)
4021 req_complete(hdev, status);
4022}
4023
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004024static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004026 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027 struct sk_buff *skb;
4028
4029 BT_DBG("%s", hdev->name);
4030
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004032 /* Send copy to monitor */
4033 hci_send_to_monitor(hdev, skb);
4034
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 if (atomic_read(&hdev->promisc)) {
4036 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004037 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 }
4039
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004040 if (test_bit(HCI_RAW, &hdev->flags) ||
4041 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 kfree_skb(skb);
4043 continue;
4044 }
4045
4046 if (test_bit(HCI_INIT, &hdev->flags)) {
4047 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004048 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 case HCI_ACLDATA_PKT:
4050 case HCI_SCODATA_PKT:
4051 kfree_skb(skb);
4052 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 }
4055
4056 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004057 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004059 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 hci_event_packet(hdev, skb);
4061 break;
4062
4063 case HCI_ACLDATA_PKT:
4064 BT_DBG("%s ACL data packet", hdev->name);
4065 hci_acldata_packet(hdev, skb);
4066 break;
4067
4068 case HCI_SCODATA_PKT:
4069 BT_DBG("%s SCO data packet", hdev->name);
4070 hci_scodata_packet(hdev, skb);
4071 break;
4072
4073 default:
4074 kfree_skb(skb);
4075 break;
4076 }
4077 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078}
4079
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004080static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004082 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 struct sk_buff *skb;
4084
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004085 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4086 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004089 if (atomic_read(&hdev->cmd_cnt)) {
4090 skb = skb_dequeue(&hdev->cmd_q);
4091 if (!skb)
4092 return;
4093
Wei Yongjun7585b972009-02-25 18:29:52 +08004094 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004096 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004097 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004099 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004100 if (test_bit(HCI_RESET, &hdev->flags))
4101 del_timer(&hdev->cmd_timer);
4102 else
4103 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004104 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105 } else {
4106 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004107 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108 }
4109 }
4110}