blob: ee946cb26906cc30aa3b4a323dd0f6014bfdc162 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
75 hci_dev_unlock(hdev);
76
77 return 0;
78}
79
80static int features_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, features_show, inode->i_private);
83}
84
85static const struct file_operations features_fops = {
86 .open = features_open,
87 .read = seq_read,
88 .llseek = seq_lseek,
89 .release = single_release,
90};
91
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070092static int blacklist_show(struct seq_file *f, void *p)
93{
94 struct hci_dev *hdev = f->private;
95 struct bdaddr_list *b;
96
97 hci_dev_lock(hdev);
98 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -070099 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700100 hci_dev_unlock(hdev);
101
102 return 0;
103}
104
105static int blacklist_open(struct inode *inode, struct file *file)
106{
107 return single_open(file, blacklist_show, inode->i_private);
108}
109
110static const struct file_operations blacklist_fops = {
111 .open = blacklist_open,
112 .read = seq_read,
113 .llseek = seq_lseek,
114 .release = single_release,
115};
116
Marcel Holtmann47219832013-10-17 17:24:15 -0700117static int uuids_show(struct seq_file *f, void *p)
118{
119 struct hci_dev *hdev = f->private;
120 struct bt_uuid *uuid;
121
122 hci_dev_lock(hdev);
123 list_for_each_entry(uuid, &hdev->uuids, list) {
124 u32 data0, data5;
125 u16 data1, data2, data3, data4;
126
127 data5 = get_unaligned_le32(uuid);
128 data4 = get_unaligned_le16(uuid + 4);
129 data3 = get_unaligned_le16(uuid + 6);
130 data2 = get_unaligned_le16(uuid + 8);
131 data1 = get_unaligned_le16(uuid + 10);
132 data0 = get_unaligned_le32(uuid + 12);
133
134 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135 data0, data1, data2, data3, data4, data5);
136 }
137 hci_dev_unlock(hdev);
138
139 return 0;
140}
141
142static int uuids_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, uuids_show, inode->i_private);
145}
146
147static const struct file_operations uuids_fops = {
148 .open = uuids_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = single_release,
152};
153
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700154static int inquiry_cache_show(struct seq_file *f, void *p)
155{
156 struct hci_dev *hdev = f->private;
157 struct discovery_state *cache = &hdev->discovery;
158 struct inquiry_entry *e;
159
160 hci_dev_lock(hdev);
161
162 list_for_each_entry(e, &cache->all, all) {
163 struct inquiry_data *data = &e->data;
164 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
165 &data->bdaddr,
166 data->pscan_rep_mode, data->pscan_period_mode,
167 data->pscan_mode, data->dev_class[2],
168 data->dev_class[1], data->dev_class[0],
169 __le16_to_cpu(data->clock_offset),
170 data->rssi, data->ssp_mode, e->timestamp);
171 }
172
173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int inquiry_cache_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, inquiry_cache_show, inode->i_private);
181}
182
183static const struct file_operations inquiry_cache_fops = {
184 .open = inquiry_cache_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700190static int link_keys_show(struct seq_file *f, void *ptr)
191{
192 struct hci_dev *hdev = f->private;
193 struct list_head *p, *n;
194
195 hci_dev_lock(hdev);
196 list_for_each_safe(p, n, &hdev->link_keys) {
197 struct link_key *key = list_entry(p, struct link_key, list);
198 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
199 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
200 }
201 hci_dev_unlock(hdev);
202
203 return 0;
204}
205
206static int link_keys_open(struct inode *inode, struct file *file)
207{
208 return single_open(file, link_keys_show, inode->i_private);
209}
210
211static const struct file_operations link_keys_fops = {
212 .open = link_keys_open,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216};
217
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700218static int dev_class_show(struct seq_file *f, void *ptr)
219{
220 struct hci_dev *hdev = f->private;
221
222 hci_dev_lock(hdev);
223 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
224 hdev->dev_class[1], hdev->dev_class[0]);
225 hci_dev_unlock(hdev);
226
227 return 0;
228}
229
230static int dev_class_open(struct inode *inode, struct file *file)
231{
232 return single_open(file, dev_class_show, inode->i_private);
233}
234
235static const struct file_operations dev_class_fops = {
236 .open = dev_class_open,
237 .read = seq_read,
238 .llseek = seq_lseek,
239 .release = single_release,
240};
241
Marcel Holtmann041000b2013-10-17 12:02:31 -0700242static int voice_setting_get(void *data, u64 *val)
243{
244 struct hci_dev *hdev = data;
245
246 hci_dev_lock(hdev);
247 *val = hdev->voice_setting;
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
254 NULL, "0x%4.4llx\n");
255
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700256static int auto_accept_delay_set(void *data, u64 val)
257{
258 struct hci_dev *hdev = data;
259
260 hci_dev_lock(hdev);
261 hdev->auto_accept_delay = val;
262 hci_dev_unlock(hdev);
263
264 return 0;
265}
266
267static int auto_accept_delay_get(void *data, u64 *val)
268{
269 struct hci_dev *hdev = data;
270
271 hci_dev_lock(hdev);
272 *val = hdev->auto_accept_delay;
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
279 auto_accept_delay_set, "%llu\n");
280
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700281static int idle_timeout_set(void *data, u64 val)
282{
283 struct hci_dev *hdev = data;
284
285 if (val != 0 && (val < 500 || val > 3600000))
286 return -EINVAL;
287
288 hci_dev_lock(hdev);
289 hdev->idle_timeout= val;
290 hci_dev_unlock(hdev);
291
292 return 0;
293}
294
295static int idle_timeout_get(void *data, u64 *val)
296{
297 struct hci_dev *hdev = data;
298
299 hci_dev_lock(hdev);
300 *val = hdev->idle_timeout;
301 hci_dev_unlock(hdev);
302
303 return 0;
304}
305
306DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
307 idle_timeout_set, "%llu\n");
308
309static int sniff_min_interval_set(void *data, u64 val)
310{
311 struct hci_dev *hdev = data;
312
313 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
314 return -EINVAL;
315
316 hci_dev_lock(hdev);
317 hdev->sniff_min_interval= val;
318 hci_dev_unlock(hdev);
319
320 return 0;
321}
322
323static int sniff_min_interval_get(void *data, u64 *val)
324{
325 struct hci_dev *hdev = data;
326
327 hci_dev_lock(hdev);
328 *val = hdev->sniff_min_interval;
329 hci_dev_unlock(hdev);
330
331 return 0;
332}
333
334DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
335 sniff_min_interval_set, "%llu\n");
336
337static int sniff_max_interval_set(void *data, u64 val)
338{
339 struct hci_dev *hdev = data;
340
341 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
342 return -EINVAL;
343
344 hci_dev_lock(hdev);
345 hdev->sniff_max_interval= val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int sniff_max_interval_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->sniff_max_interval;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
363 sniff_max_interval_set, "%llu\n");
364
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700365static int static_address_show(struct seq_file *f, void *p)
366{
367 struct hci_dev *hdev = f->private;
368
369 hci_dev_lock(hdev);
370 seq_printf(f, "%pMR\n", &hdev->static_addr);
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int static_address_open(struct inode *inode, struct file *file)
377{
378 return single_open(file, static_address_show, inode->i_private);
379}
380
381static const struct file_operations static_address_fops = {
382 .open = static_address_open,
383 .read = seq_read,
384 .llseek = seq_lseek,
385 .release = single_release,
386};
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388/* ---- HCI requests ---- */
389
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200392 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
394 if (hdev->req_status == HCI_REQ_PEND) {
395 hdev->req_result = result;
396 hdev->req_status = HCI_REQ_DONE;
397 wake_up_interruptible(&hdev->req_wait_q);
398 }
399}
400
401static void hci_req_cancel(struct hci_dev *hdev, int err)
402{
403 BT_DBG("%s err 0x%2.2x", hdev->name, err);
404
405 if (hdev->req_status == HCI_REQ_PEND) {
406 hdev->req_result = err;
407 hdev->req_status = HCI_REQ_CANCELED;
408 wake_up_interruptible(&hdev->req_wait_q);
409 }
410}
411
Fengguang Wu77a63e02013-04-20 16:24:31 +0300412static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
413 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300414{
415 struct hci_ev_cmd_complete *ev;
416 struct hci_event_hdr *hdr;
417 struct sk_buff *skb;
418
419 hci_dev_lock(hdev);
420
421 skb = hdev->recv_evt;
422 hdev->recv_evt = NULL;
423
424 hci_dev_unlock(hdev);
425
426 if (!skb)
427 return ERR_PTR(-ENODATA);
428
429 if (skb->len < sizeof(*hdr)) {
430 BT_ERR("Too short HCI event");
431 goto failed;
432 }
433
434 hdr = (void *) skb->data;
435 skb_pull(skb, HCI_EVENT_HDR_SIZE);
436
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300437 if (event) {
438 if (hdr->evt != event)
439 goto failed;
440 return skb;
441 }
442
Johan Hedberg75e84b72013-04-02 13:35:04 +0300443 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
444 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
445 goto failed;
446 }
447
448 if (skb->len < sizeof(*ev)) {
449 BT_ERR("Too short cmd_complete event");
450 goto failed;
451 }
452
453 ev = (void *) skb->data;
454 skb_pull(skb, sizeof(*ev));
455
456 if (opcode == __le16_to_cpu(ev->opcode))
457 return skb;
458
459 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
460 __le16_to_cpu(ev->opcode));
461
462failed:
463 kfree_skb(skb);
464 return ERR_PTR(-ENODATA);
465}
466
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300467struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300468 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300469{
470 DECLARE_WAITQUEUE(wait, current);
471 struct hci_request req;
472 int err = 0;
473
474 BT_DBG("%s", hdev->name);
475
476 hci_req_init(&req, hdev);
477
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300478 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300479
480 hdev->req_status = HCI_REQ_PEND;
481
482 err = hci_req_run(&req, hci_req_sync_complete);
483 if (err < 0)
484 return ERR_PTR(err);
485
486 add_wait_queue(&hdev->req_wait_q, &wait);
487 set_current_state(TASK_INTERRUPTIBLE);
488
489 schedule_timeout(timeout);
490
491 remove_wait_queue(&hdev->req_wait_q, &wait);
492
493 if (signal_pending(current))
494 return ERR_PTR(-EINTR);
495
496 switch (hdev->req_status) {
497 case HCI_REQ_DONE:
498 err = -bt_to_errno(hdev->req_result);
499 break;
500
501 case HCI_REQ_CANCELED:
502 err = -hdev->req_result;
503 break;
504
505 default:
506 err = -ETIMEDOUT;
507 break;
508 }
509
510 hdev->req_status = hdev->req_result = 0;
511
512 BT_DBG("%s end: err %d", hdev->name, err);
513
514 if (err < 0)
515 return ERR_PTR(err);
516
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300517 return hci_get_cmd_complete(hdev, opcode, event);
518}
519EXPORT_SYMBOL(__hci_cmd_sync_ev);
520
521struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300522 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300523{
524 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300525}
526EXPORT_SYMBOL(__hci_cmd_sync);
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200529static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 void (*func)(struct hci_request *req,
531 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200532 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 DECLARE_WAITQUEUE(wait, current);
536 int err = 0;
537
538 BT_DBG("%s start", hdev->name);
539
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_req_init(&req, hdev);
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 hdev->req_status = HCI_REQ_PEND;
543
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200545
Johan Hedberg42c6b122013-03-05 20:37:49 +0200546 err = hci_req_run(&req, hci_req_sync_complete);
547 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200548 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300549
550 /* ENODATA means the HCI request command queue is empty.
551 * This can happen when a request with conditionals doesn't
552 * trigger any commands to be sent. This is normal behavior
553 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200554 */
Andre Guedes920c8302013-03-08 11:20:15 -0300555 if (err == -ENODATA)
556 return 0;
557
558 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200559 }
560
Andre Guedesbc4445c2013-03-08 11:20:13 -0300561 add_wait_queue(&hdev->req_wait_q, &wait);
562 set_current_state(TASK_INTERRUPTIBLE);
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 schedule_timeout(timeout);
565
566 remove_wait_queue(&hdev->req_wait_q, &wait);
567
568 if (signal_pending(current))
569 return -EINTR;
570
571 switch (hdev->req_status) {
572 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700573 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 break;
575
576 case HCI_REQ_CANCELED:
577 err = -hdev->req_result;
578 break;
579
580 default:
581 err = -ETIMEDOUT;
582 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Johan Hedberga5040ef2011-01-10 13:28:59 +0200585 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 BT_DBG("%s end: err %d", hdev->name, err);
588
589 return err;
590}
591
Johan Hedberg01178cd2013-03-05 20:37:41 +0200592static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593 void (*req)(struct hci_request *req,
594 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200595 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
597 int ret;
598
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200599 if (!test_bit(HCI_UP, &hdev->flags))
600 return -ENETDOWN;
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 /* Serialize all requests */
603 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200604 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 hci_req_unlock(hdev);
606
607 return ret;
608}
609
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 set_bit(HCI_RESET, &req->hdev->flags);
616 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617}
618
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200621 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200626 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200627 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200628
629 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200630 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631}
632
Johan Hedberg42c6b122013-03-05 20:37:49 +0200633static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200634{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200635 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200636
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200637 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300639
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700640 /* Read Local Supported Commands */
641 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
642
643 /* Read Local Supported Features */
644 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
645
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300646 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200647 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300648
649 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700651
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700652 /* Read Flow Control Mode */
653 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
654
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700655 /* Read Location Data */
656 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200657}
658
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200660{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200662
663 BT_DBG("%s %ld", hdev->name, opt);
664
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300665 /* Reset */
666 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300668
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200669 switch (hdev->dev_type) {
670 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200672 break;
673
674 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200676 break;
677
678 default:
679 BT_ERR("Unknown device type %d", hdev->dev_type);
680 break;
681 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200682}
683
Johan Hedberg42c6b122013-03-05 20:37:49 +0200684static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700686 struct hci_dev *hdev = req->hdev;
687
Johan Hedberg2177bab2013-03-05 20:37:43 +0200688 __le16 param;
689 __u8 flt_type;
690
691 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200692 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200693
694 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200695 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200696
697 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200699
700 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700703 /* Read Number of Supported IAC */
704 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
705
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700706 /* Read Current IAC LAP */
707 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
708
Johan Hedberg2177bab2013-03-05 20:37:43 +0200709 /* Clear Event Filters */
710 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200711 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200712
713 /* Connection accept timeout ~20 secs */
714 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200715 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700717 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
718 * but it does not support page scan related HCI commands.
719 */
720 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500721 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
722 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
723 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200724}
725
Johan Hedberg42c6b122013-03-05 20:37:49 +0200726static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200727{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300728 struct hci_dev *hdev = req->hdev;
729
Johan Hedberg2177bab2013-03-05 20:37:43 +0200730 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200731 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200732
733 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200734 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200735
736 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200738
739 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200741
742 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200743 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300744
745 /* LE-only controllers have LE implicitly enabled */
746 if (!lmp_bredr_capable(hdev))
747 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200748}
749
750static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
751{
752 if (lmp_ext_inq_capable(hdev))
753 return 0x02;
754
755 if (lmp_inq_rssi_capable(hdev))
756 return 0x01;
757
758 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
759 hdev->lmp_subver == 0x0757)
760 return 0x01;
761
762 if (hdev->manufacturer == 15) {
763 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
764 return 0x01;
765 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
766 return 0x01;
767 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
768 return 0x01;
769 }
770
771 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
772 hdev->lmp_subver == 0x1805)
773 return 0x01;
774
775 return 0x00;
776}
777
Johan Hedberg42c6b122013-03-05 20:37:49 +0200778static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200779{
780 u8 mode;
781
Johan Hedberg42c6b122013-03-05 20:37:49 +0200782 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200783
Johan Hedberg42c6b122013-03-05 20:37:49 +0200784 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785}
786
Johan Hedberg42c6b122013-03-05 20:37:49 +0200787static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200788{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200789 struct hci_dev *hdev = req->hdev;
790
Johan Hedberg2177bab2013-03-05 20:37:43 +0200791 /* The second byte is 0xff instead of 0x9f (two reserved bits
792 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
793 * command otherwise.
794 */
795 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
796
797 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
798 * any event mask for pre 1.2 devices.
799 */
800 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
801 return;
802
803 if (lmp_bredr_capable(hdev)) {
804 events[4] |= 0x01; /* Flow Specification Complete */
805 events[4] |= 0x02; /* Inquiry Result with RSSI */
806 events[4] |= 0x04; /* Read Remote Extended Features Complete */
807 events[5] |= 0x08; /* Synchronous Connection Complete */
808 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700809 } else {
810 /* Use a different default for LE-only devices */
811 memset(events, 0, sizeof(events));
812 events[0] |= 0x10; /* Disconnection Complete */
813 events[0] |= 0x80; /* Encryption Change */
814 events[1] |= 0x08; /* Read Remote Version Information Complete */
815 events[1] |= 0x20; /* Command Complete */
816 events[1] |= 0x40; /* Command Status */
817 events[1] |= 0x80; /* Hardware Error */
818 events[2] |= 0x04; /* Number of Completed Packets */
819 events[3] |= 0x02; /* Data Buffer Overflow */
820 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200821 }
822
823 if (lmp_inq_rssi_capable(hdev))
824 events[4] |= 0x02; /* Inquiry Result with RSSI */
825
826 if (lmp_sniffsubr_capable(hdev))
827 events[5] |= 0x20; /* Sniff Subrating */
828
829 if (lmp_pause_enc_capable(hdev))
830 events[5] |= 0x80; /* Encryption Key Refresh Complete */
831
832 if (lmp_ext_inq_capable(hdev))
833 events[5] |= 0x40; /* Extended Inquiry Result */
834
835 if (lmp_no_flush_capable(hdev))
836 events[7] |= 0x01; /* Enhanced Flush Complete */
837
838 if (lmp_lsto_capable(hdev))
839 events[6] |= 0x80; /* Link Supervision Timeout Changed */
840
841 if (lmp_ssp_capable(hdev)) {
842 events[6] |= 0x01; /* IO Capability Request */
843 events[6] |= 0x02; /* IO Capability Response */
844 events[6] |= 0x04; /* User Confirmation Request */
845 events[6] |= 0x08; /* User Passkey Request */
846 events[6] |= 0x10; /* Remote OOB Data Request */
847 events[6] |= 0x20; /* Simple Pairing Complete */
848 events[7] |= 0x04; /* User Passkey Notification */
849 events[7] |= 0x08; /* Keypress Notification */
850 events[7] |= 0x10; /* Remote Host Supported
851 * Features Notification
852 */
853 }
854
855 if (lmp_le_capable(hdev))
856 events[7] |= 0x20; /* LE Meta-Event */
857
Johan Hedberg42c6b122013-03-05 20:37:49 +0200858 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200859
860 if (lmp_le_capable(hdev)) {
861 memset(events, 0, sizeof(events));
862 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
864 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200865 }
866}
867
Johan Hedberg42c6b122013-03-05 20:37:49 +0200868static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200869{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200870 struct hci_dev *hdev = req->hdev;
871
Johan Hedberg2177bab2013-03-05 20:37:43 +0200872 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200873 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300874 else
875 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200876
877 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200878 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200879
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200881
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300882 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
883 * local supported commands HCI command.
884 */
885 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200887
888 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700889 /* When SSP is available, then the host features page
890 * should also be available as well. However some
891 * controllers list the max_page as 0 as long as SSP
892 * has not been enabled. To achieve proper debugging
893 * output, force the minimum max_page to 1 at least.
894 */
895 hdev->max_page = 0x01;
896
Johan Hedberg2177bab2013-03-05 20:37:43 +0200897 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
898 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200899 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
900 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200901 } else {
902 struct hci_cp_write_eir cp;
903
904 memset(hdev->eir, 0, sizeof(hdev->eir));
905 memset(&cp, 0, sizeof(cp));
906
Johan Hedberg42c6b122013-03-05 20:37:49 +0200907 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200908 }
909 }
910
911 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200912 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200913
914 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200915 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200916
917 if (lmp_ext_feat_capable(hdev)) {
918 struct hci_cp_read_local_ext_features cp;
919
920 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200921 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
922 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200923 }
924
925 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
926 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
928 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200929 }
930}
931
Johan Hedberg42c6b122013-03-05 20:37:49 +0200932static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200933{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200934 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200935 struct hci_cp_write_def_link_policy cp;
936 u16 link_policy = 0;
937
938 if (lmp_rswitch_capable(hdev))
939 link_policy |= HCI_LP_RSWITCH;
940 if (lmp_hold_capable(hdev))
941 link_policy |= HCI_LP_HOLD;
942 if (lmp_sniff_capable(hdev))
943 link_policy |= HCI_LP_SNIFF;
944 if (lmp_park_capable(hdev))
945 link_policy |= HCI_LP_PARK;
946
947 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200948 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200949}
950
Johan Hedberg42c6b122013-03-05 20:37:49 +0200951static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200952{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200954 struct hci_cp_write_le_host_supported cp;
955
Johan Hedbergc73eee92013-04-19 18:35:21 +0300956 /* LE-only devices do not support explicit enablement */
957 if (!lmp_bredr_capable(hdev))
958 return;
959
Johan Hedberg2177bab2013-03-05 20:37:43 +0200960 memset(&cp, 0, sizeof(cp));
961
962 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
963 cp.le = 0x01;
964 cp.simul = lmp_le_br_capable(hdev);
965 }
966
967 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
969 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200970}
971
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300972static void hci_set_event_mask_page_2(struct hci_request *req)
973{
974 struct hci_dev *hdev = req->hdev;
975 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
976
977 /* If Connectionless Slave Broadcast master role is supported
978 * enable all necessary events for it.
979 */
980 if (hdev->features[2][0] & 0x01) {
981 events[1] |= 0x40; /* Triggered Clock Capture */
982 events[1] |= 0x80; /* Synchronization Train Complete */
983 events[2] |= 0x10; /* Slave Page Response Timeout */
984 events[2] |= 0x20; /* CSB Channel Map Change */
985 }
986
987 /* If Connectionless Slave Broadcast slave role is supported
988 * enable all necessary events for it.
989 */
990 if (hdev->features[2][0] & 0x02) {
991 events[2] |= 0x01; /* Synchronization Train Received */
992 events[2] |= 0x02; /* CSB Receive */
993 events[2] |= 0x04; /* CSB Timeout */
994 events[2] |= 0x08; /* Truncated Page Complete */
995 }
996
997 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
998}
999
Johan Hedberg42c6b122013-03-05 20:37:49 +02001000static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001001{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001003 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001005 /* Some Broadcom based Bluetooth controllers do not support the
1006 * Delete Stored Link Key command. They are clearly indicating its
1007 * absence in the bit mask of supported commands.
1008 *
1009 * Check the supported commands and only if the the command is marked
1010 * as supported send it. If not supported assume that the controller
1011 * does not have actual support for stored link keys which makes this
1012 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001013 */
Johan Hedberg59f45d52013-06-13 11:01:13 +03001014 if (hdev->commands[6] & 0x80) {
1015 struct hci_cp_delete_stored_link_key cp;
1016
1017 bacpy(&cp.bdaddr, BDADDR_ANY);
1018 cp.delete_all = 0x01;
1019 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1020 sizeof(cp), &cp);
1021 }
1022
Johan Hedberg2177bab2013-03-05 20:37:43 +02001023 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001025
Marcel Holtmann441ad2d2013-10-15 06:33:52 -07001026 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001028
1029 /* Read features beyond page 1 if available */
1030 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1031 struct hci_cp_read_local_ext_features cp;
1032
1033 cp.page = p;
1034 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1035 sizeof(cp), &cp);
1036 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001037}
1038
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001039static void hci_init4_req(struct hci_request *req, unsigned long opt)
1040{
1041 struct hci_dev *hdev = req->hdev;
1042
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001043 /* Set event mask page 2 if the HCI command for it is supported */
1044 if (hdev->commands[22] & 0x04)
1045 hci_set_event_mask_page_2(req);
1046
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001047 /* Check for Synchronization Train support */
1048 if (hdev->features[2][0] & 0x04)
1049 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1050}
1051
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052static int __hci_init(struct hci_dev *hdev)
1053{
1054 int err;
1055
1056 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1057 if (err < 0)
1058 return err;
1059
1060 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1061 * BR/EDR/LE type controllers. AMP controllers only need the
1062 * first stage init.
1063 */
1064 if (hdev->dev_type != HCI_BREDR)
1065 return 0;
1066
1067 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1068 if (err < 0)
1069 return err;
1070
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001071 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1072 if (err < 0)
1073 return err;
1074
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001075 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1076 if (err < 0)
1077 return err;
1078
1079 /* Only create debugfs entries during the initial setup
1080 * phase and not every time the controller gets powered on.
1081 */
1082 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1083 return 0;
1084
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001085 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1086 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001087 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1088 &hdev->manufacturer);
1089 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1090 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001091 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1092 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001093 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1094
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001095 if (lmp_bredr_capable(hdev)) {
1096 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1097 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001098 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1099 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001100 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1101 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001102 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1103 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001104 }
1105
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001106 if (lmp_ssp_capable(hdev))
1107 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1108 hdev, &auto_accept_delay_fops);
1109
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001110 if (lmp_sniff_capable(hdev)) {
1111 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1112 hdev, &idle_timeout_fops);
1113 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1114 hdev, &sniff_min_interval_fops);
1115 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1116 hdev, &sniff_max_interval_fops);
1117 }
1118
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001119 if (lmp_le_capable(hdev))
1120 debugfs_create_file("static_address", 0444, hdev->debugfs,
1121 hdev, &static_address_fops);
1122
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001123 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001124}
1125
Johan Hedberg42c6b122013-03-05 20:37:49 +02001126static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127{
1128 __u8 scan = opt;
1129
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
1132 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001133 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134}
1135
Johan Hedberg42c6b122013-03-05 20:37:49 +02001136static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137{
1138 __u8 auth = opt;
1139
Johan Hedberg42c6b122013-03-05 20:37:49 +02001140 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
1142 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001143 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144}
1145
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147{
1148 __u8 encrypt = opt;
1149
Johan Hedberg42c6b122013-03-05 20:37:49 +02001150 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001152 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001153 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154}
1155
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001157{
1158 __le16 policy = cpu_to_le16(opt);
1159
Johan Hedberg42c6b122013-03-05 20:37:49 +02001160 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001161
1162 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001163 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001164}
1165
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001166/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 * Device is held on return. */
1168struct hci_dev *hci_dev_get(int index)
1169{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001170 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
1172 BT_DBG("%d", index);
1173
1174 if (index < 0)
1175 return NULL;
1176
1177 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001178 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 if (d->id == index) {
1180 hdev = hci_dev_hold(d);
1181 break;
1182 }
1183 }
1184 read_unlock(&hci_dev_list_lock);
1185 return hdev;
1186}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
1188/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001189
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001190bool hci_discovery_active(struct hci_dev *hdev)
1191{
1192 struct discovery_state *discov = &hdev->discovery;
1193
Andre Guedes6fbe1952012-02-03 17:47:58 -03001194 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001195 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001196 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001197 return true;
1198
Andre Guedes6fbe1952012-02-03 17:47:58 -03001199 default:
1200 return false;
1201 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001202}
1203
Johan Hedbergff9ef572012-01-04 14:23:45 +02001204void hci_discovery_set_state(struct hci_dev *hdev, int state)
1205{
1206 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1207
1208 if (hdev->discovery.state == state)
1209 return;
1210
1211 switch (state) {
1212 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001213 if (hdev->discovery.state != DISCOVERY_STARTING)
1214 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001215 break;
1216 case DISCOVERY_STARTING:
1217 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001218 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001219 mgmt_discovering(hdev, 1);
1220 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001221 case DISCOVERY_RESOLVING:
1222 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001223 case DISCOVERY_STOPPING:
1224 break;
1225 }
1226
1227 hdev->discovery.state = state;
1228}
1229
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001230void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231{
Johan Hedberg30883512012-01-04 14:16:21 +02001232 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001233 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Johan Hedberg561aafb2012-01-04 13:31:59 +02001235 list_for_each_entry_safe(p, n, &cache->all, all) {
1236 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001237 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001239
1240 INIT_LIST_HEAD(&cache->unknown);
1241 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242}
1243
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001244struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1245 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
Johan Hedberg30883512012-01-04 14:16:21 +02001247 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 struct inquiry_entry *e;
1249
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001250 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
Johan Hedberg561aafb2012-01-04 13:31:59 +02001252 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001254 return e;
1255 }
1256
1257 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258}
1259
Johan Hedberg561aafb2012-01-04 13:31:59 +02001260struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001261 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001262{
Johan Hedberg30883512012-01-04 14:16:21 +02001263 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001264 struct inquiry_entry *e;
1265
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001266 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001267
1268 list_for_each_entry(e, &cache->unknown, list) {
1269 if (!bacmp(&e->data.bdaddr, bdaddr))
1270 return e;
1271 }
1272
1273 return NULL;
1274}
1275
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001276struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001277 bdaddr_t *bdaddr,
1278 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001279{
1280 struct discovery_state *cache = &hdev->discovery;
1281 struct inquiry_entry *e;
1282
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001283 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001284
1285 list_for_each_entry(e, &cache->resolve, list) {
1286 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1287 return e;
1288 if (!bacmp(&e->data.bdaddr, bdaddr))
1289 return e;
1290 }
1291
1292 return NULL;
1293}
1294
Johan Hedberga3d4e202012-01-09 00:53:02 +02001295void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001296 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001297{
1298 struct discovery_state *cache = &hdev->discovery;
1299 struct list_head *pos = &cache->resolve;
1300 struct inquiry_entry *p;
1301
1302 list_del(&ie->list);
1303
1304 list_for_each_entry(p, &cache->resolve, list) {
1305 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001306 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001307 break;
1308 pos = &p->list;
1309 }
1310
1311 list_add(&ie->list, pos);
1312}
1313
Johan Hedberg31754052012-01-04 13:39:52 +02001314bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001315 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316{
Johan Hedberg30883512012-01-04 14:16:21 +02001317 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001318 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001320 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Szymon Janc2b2fec42012-11-20 11:38:54 +01001322 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1323
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001324 if (ssp)
1325 *ssp = data->ssp_mode;
1326
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001327 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001328 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001329 if (ie->data.ssp_mode && ssp)
1330 *ssp = true;
1331
Johan Hedberga3d4e202012-01-09 00:53:02 +02001332 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001333 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001334 ie->data.rssi = data->rssi;
1335 hci_inquiry_cache_update_resolve(hdev, ie);
1336 }
1337
Johan Hedberg561aafb2012-01-04 13:31:59 +02001338 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001339 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001340
Johan Hedberg561aafb2012-01-04 13:31:59 +02001341 /* Entry not in the cache. Add new one. */
1342 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1343 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001344 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001345
1346 list_add(&ie->all, &cache->all);
1347
1348 if (name_known) {
1349 ie->name_state = NAME_KNOWN;
1350 } else {
1351 ie->name_state = NAME_NOT_KNOWN;
1352 list_add(&ie->list, &cache->unknown);
1353 }
1354
1355update:
1356 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001357 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001358 ie->name_state = NAME_KNOWN;
1359 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 }
1361
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001362 memcpy(&ie->data, data, sizeof(*data));
1363 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001365
1366 if (ie->name_state == NAME_NOT_KNOWN)
1367 return false;
1368
1369 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370}
1371
1372static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1373{
Johan Hedberg30883512012-01-04 14:16:21 +02001374 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 struct inquiry_info *info = (struct inquiry_info *) buf;
1376 struct inquiry_entry *e;
1377 int copied = 0;
1378
Johan Hedberg561aafb2012-01-04 13:31:59 +02001379 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001381
1382 if (copied >= num)
1383 break;
1384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 bacpy(&info->bdaddr, &data->bdaddr);
1386 info->pscan_rep_mode = data->pscan_rep_mode;
1387 info->pscan_period_mode = data->pscan_period_mode;
1388 info->pscan_mode = data->pscan_mode;
1389 memcpy(info->dev_class, data->dev_class, 3);
1390 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001393 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 }
1395
1396 BT_DBG("cache %p, copied %d", cache, copied);
1397 return copied;
1398}
1399
Johan Hedberg42c6b122013-03-05 20:37:49 +02001400static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401{
1402 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001403 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 struct hci_cp_inquiry cp;
1405
1406 BT_DBG("%s", hdev->name);
1407
1408 if (test_bit(HCI_INQUIRY, &hdev->flags))
1409 return;
1410
1411 /* Start Inquiry */
1412 memcpy(&cp.lap, &ir->lap, 3);
1413 cp.length = ir->length;
1414 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001415 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416}
1417
Andre Guedes3e13fa12013-03-27 20:04:56 -03001418static int wait_inquiry(void *word)
1419{
1420 schedule();
1421 return signal_pending(current);
1422}
1423
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424int hci_inquiry(void __user *arg)
1425{
1426 __u8 __user *ptr = arg;
1427 struct hci_inquiry_req ir;
1428 struct hci_dev *hdev;
1429 int err = 0, do_inquiry = 0, max_rsp;
1430 long timeo;
1431 __u8 *buf;
1432
1433 if (copy_from_user(&ir, ptr, sizeof(ir)))
1434 return -EFAULT;
1435
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001436 hdev = hci_dev_get(ir.dev_id);
1437 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 return -ENODEV;
1439
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001440 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1441 err = -EBUSY;
1442 goto done;
1443 }
1444
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001445 if (hdev->dev_type != HCI_BREDR) {
1446 err = -EOPNOTSUPP;
1447 goto done;
1448 }
1449
Johan Hedberg56f87902013-10-02 13:43:13 +03001450 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1451 err = -EOPNOTSUPP;
1452 goto done;
1453 }
1454
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001455 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001456 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001457 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001458 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 do_inquiry = 1;
1460 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001461 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Marcel Holtmann04837f62006-07-03 10:02:33 +02001463 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001464
1465 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001466 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1467 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001468 if (err < 0)
1469 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001470
1471 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1472 * cleared). If it is interrupted by a signal, return -EINTR.
1473 */
1474 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1475 TASK_INTERRUPTIBLE))
1476 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001479 /* for unlimited number of responses we will use buffer with
1480 * 255 entries
1481 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1483
1484 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1485 * copy it to the user space.
1486 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001487 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001488 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 err = -ENOMEM;
1490 goto done;
1491 }
1492
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001493 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001495 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497 BT_DBG("num_rsp %d", ir.num_rsp);
1498
1499 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1500 ptr += sizeof(ir);
1501 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001502 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001504 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 err = -EFAULT;
1506
1507 kfree(buf);
1508
1509done:
1510 hci_dev_put(hdev);
1511 return err;
1512}
1513
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001514static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 int ret = 0;
1517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 BT_DBG("%s %p", hdev->name, hdev);
1519
1520 hci_req_lock(hdev);
1521
Johan Hovold94324962012-03-15 14:48:41 +01001522 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1523 ret = -ENODEV;
1524 goto done;
1525 }
1526
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001527 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1528 /* Check for rfkill but allow the HCI setup stage to
1529 * proceed (which in itself doesn't cause any RF activity).
1530 */
1531 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1532 ret = -ERFKILL;
1533 goto done;
1534 }
1535
1536 /* Check for valid public address or a configured static
1537 * random adddress, but let the HCI setup proceed to
1538 * be able to determine if there is a public address
1539 * or not.
1540 *
1541 * This check is only valid for BR/EDR controllers
1542 * since AMP controllers do not have an address.
1543 */
1544 if (hdev->dev_type == HCI_BREDR &&
1545 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1546 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1547 ret = -EADDRNOTAVAIL;
1548 goto done;
1549 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001550 }
1551
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 if (test_bit(HCI_UP, &hdev->flags)) {
1553 ret = -EALREADY;
1554 goto done;
1555 }
1556
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 if (hdev->open(hdev)) {
1558 ret = -EIO;
1559 goto done;
1560 }
1561
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001562 atomic_set(&hdev->cmd_cnt, 1);
1563 set_bit(HCI_INIT, &hdev->flags);
1564
1565 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1566 ret = hdev->setup(hdev);
1567
1568 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001569 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1570 set_bit(HCI_RAW, &hdev->flags);
1571
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001572 if (!test_bit(HCI_RAW, &hdev->flags) &&
1573 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001574 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 }
1576
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001577 clear_bit(HCI_INIT, &hdev->flags);
1578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 if (!ret) {
1580 hci_dev_hold(hdev);
1581 set_bit(HCI_UP, &hdev->flags);
1582 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001583 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001584 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001585 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001586 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001587 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001588 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001589 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001590 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001592 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001593 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001594 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
1596 skb_queue_purge(&hdev->cmd_q);
1597 skb_queue_purge(&hdev->rx_q);
1598
1599 if (hdev->flush)
1600 hdev->flush(hdev);
1601
1602 if (hdev->sent_cmd) {
1603 kfree_skb(hdev->sent_cmd);
1604 hdev->sent_cmd = NULL;
1605 }
1606
1607 hdev->close(hdev);
1608 hdev->flags = 0;
1609 }
1610
1611done:
1612 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 return ret;
1614}
1615
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001616/* ---- HCI ioctl helpers ---- */
1617
1618int hci_dev_open(__u16 dev)
1619{
1620 struct hci_dev *hdev;
1621 int err;
1622
1623 hdev = hci_dev_get(dev);
1624 if (!hdev)
1625 return -ENODEV;
1626
Johan Hedberge1d08f42013-10-01 22:44:50 +03001627 /* We need to ensure that no other power on/off work is pending
1628 * before proceeding to call hci_dev_do_open. This is
1629 * particularly important if the setup procedure has not yet
1630 * completed.
1631 */
1632 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1633 cancel_delayed_work(&hdev->power_off);
1634
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001635 /* After this call it is guaranteed that the setup procedure
1636 * has finished. This means that error conditions like RFKILL
1637 * or no valid public or static random address apply.
1638 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001639 flush_workqueue(hdev->req_workqueue);
1640
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001641 err = hci_dev_do_open(hdev);
1642
1643 hci_dev_put(hdev);
1644
1645 return err;
1646}
1647
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648static int hci_dev_do_close(struct hci_dev *hdev)
1649{
1650 BT_DBG("%s %p", hdev->name, hdev);
1651
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001652 cancel_delayed_work(&hdev->power_off);
1653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 hci_req_cancel(hdev, ENODEV);
1655 hci_req_lock(hdev);
1656
1657 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001658 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 hci_req_unlock(hdev);
1660 return 0;
1661 }
1662
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001663 /* Flush RX and TX works */
1664 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001665 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001667 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001668 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001669 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001670 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001671 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001672 }
1673
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001674 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001675 cancel_delayed_work(&hdev->service_cache);
1676
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001677 cancel_delayed_work_sync(&hdev->le_scan_disable);
1678
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001679 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001680 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001682 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
1684 hci_notify(hdev, HCI_DEV_DOWN);
1685
1686 if (hdev->flush)
1687 hdev->flush(hdev);
1688
1689 /* Reset device */
1690 skb_queue_purge(&hdev->cmd_q);
1691 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001692 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001693 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001694 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001696 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 clear_bit(HCI_INIT, &hdev->flags);
1698 }
1699
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001700 /* flush cmd work */
1701 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
1703 /* Drop queues */
1704 skb_queue_purge(&hdev->rx_q);
1705 skb_queue_purge(&hdev->cmd_q);
1706 skb_queue_purge(&hdev->raw_q);
1707
1708 /* Drop last sent command */
1709 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001710 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 kfree_skb(hdev->sent_cmd);
1712 hdev->sent_cmd = NULL;
1713 }
1714
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001715 kfree_skb(hdev->recv_evt);
1716 hdev->recv_evt = NULL;
1717
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 /* After this point our queues are empty
1719 * and no tasks are scheduled. */
1720 hdev->close(hdev);
1721
Johan Hedberg35b973c2013-03-15 17:06:59 -05001722 /* Clear flags */
1723 hdev->flags = 0;
1724 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1725
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001726 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1727 if (hdev->dev_type == HCI_BREDR) {
1728 hci_dev_lock(hdev);
1729 mgmt_powered(hdev, 0);
1730 hci_dev_unlock(hdev);
1731 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001732 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001733
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001734 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001735 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001736
Johan Hedberge59fda82012-02-22 18:11:53 +02001737 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001738 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 hci_req_unlock(hdev);
1741
1742 hci_dev_put(hdev);
1743 return 0;
1744}
1745
1746int hci_dev_close(__u16 dev)
1747{
1748 struct hci_dev *hdev;
1749 int err;
1750
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001751 hdev = hci_dev_get(dev);
1752 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001754
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001755 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1756 err = -EBUSY;
1757 goto done;
1758 }
1759
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001760 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1761 cancel_delayed_work(&hdev->power_off);
1762
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001764
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001765done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 hci_dev_put(hdev);
1767 return err;
1768}
1769
1770int hci_dev_reset(__u16 dev)
1771{
1772 struct hci_dev *hdev;
1773 int ret = 0;
1774
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001775 hdev = hci_dev_get(dev);
1776 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 return -ENODEV;
1778
1779 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
Marcel Holtmann808a0492013-08-26 20:57:58 -07001781 if (!test_bit(HCI_UP, &hdev->flags)) {
1782 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001784 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001786 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1787 ret = -EBUSY;
1788 goto done;
1789 }
1790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 /* Drop queues */
1792 skb_queue_purge(&hdev->rx_q);
1793 skb_queue_purge(&hdev->cmd_q);
1794
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001795 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001796 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001798 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
1800 if (hdev->flush)
1801 hdev->flush(hdev);
1802
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001803 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001804 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
1806 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001807 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 hci_req_unlock(hdev);
1811 hci_dev_put(hdev);
1812 return ret;
1813}
1814
1815int hci_dev_reset_stat(__u16 dev)
1816{
1817 struct hci_dev *hdev;
1818 int ret = 0;
1819
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001820 hdev = hci_dev_get(dev);
1821 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 return -ENODEV;
1823
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001824 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1825 ret = -EBUSY;
1826 goto done;
1827 }
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1830
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001831done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 return ret;
1834}
1835
1836int hci_dev_cmd(unsigned int cmd, void __user *arg)
1837{
1838 struct hci_dev *hdev;
1839 struct hci_dev_req dr;
1840 int err = 0;
1841
1842 if (copy_from_user(&dr, arg, sizeof(dr)))
1843 return -EFAULT;
1844
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001845 hdev = hci_dev_get(dr.dev_id);
1846 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 return -ENODEV;
1848
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001849 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1850 err = -EBUSY;
1851 goto done;
1852 }
1853
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001854 if (hdev->dev_type != HCI_BREDR) {
1855 err = -EOPNOTSUPP;
1856 goto done;
1857 }
1858
Johan Hedberg56f87902013-10-02 13:43:13 +03001859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1860 err = -EOPNOTSUPP;
1861 goto done;
1862 }
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 switch (cmd) {
1865 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001866 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1867 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 break;
1869
1870 case HCISETENCRYPT:
1871 if (!lmp_encrypt_capable(hdev)) {
1872 err = -EOPNOTSUPP;
1873 break;
1874 }
1875
1876 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1877 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001878 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1879 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 if (err)
1881 break;
1882 }
1883
Johan Hedberg01178cd2013-03-05 20:37:41 +02001884 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1885 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 break;
1887
1888 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001889 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1890 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 break;
1892
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001893 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001894 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1895 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001896 break;
1897
1898 case HCISETLINKMODE:
1899 hdev->link_mode = ((__u16) dr.dev_opt) &
1900 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1901 break;
1902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 case HCISETPTYPE:
1904 hdev->pkt_type = (__u16) dr.dev_opt;
1905 break;
1906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001908 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1909 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 break;
1911
1912 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001913 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1914 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 break;
1916
1917 default:
1918 err = -EINVAL;
1919 break;
1920 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001921
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001922done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 hci_dev_put(hdev);
1924 return err;
1925}
1926
1927int hci_get_dev_list(void __user *arg)
1928{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001929 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 struct hci_dev_list_req *dl;
1931 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 int n = 0, size, err;
1933 __u16 dev_num;
1934
1935 if (get_user(dev_num, (__u16 __user *) arg))
1936 return -EFAULT;
1937
1938 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1939 return -EINVAL;
1940
1941 size = sizeof(*dl) + dev_num * sizeof(*dr);
1942
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001943 dl = kzalloc(size, GFP_KERNEL);
1944 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 return -ENOMEM;
1946
1947 dr = dl->dev_req;
1948
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001949 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001950 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001951 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001952 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001953
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001954 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1955 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001956
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 (dr + n)->dev_id = hdev->id;
1958 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 if (++n >= dev_num)
1961 break;
1962 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001963 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
1965 dl->dev_num = n;
1966 size = sizeof(*dl) + n * sizeof(*dr);
1967
1968 err = copy_to_user(arg, dl, size);
1969 kfree(dl);
1970
1971 return err ? -EFAULT : 0;
1972}
1973
1974int hci_get_dev_info(void __user *arg)
1975{
1976 struct hci_dev *hdev;
1977 struct hci_dev_info di;
1978 int err = 0;
1979
1980 if (copy_from_user(&di, arg, sizeof(di)))
1981 return -EFAULT;
1982
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001983 hdev = hci_dev_get(di.dev_id);
1984 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 return -ENODEV;
1986
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001987 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001988 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001989
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001990 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1991 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 strcpy(di.name, hdev->name);
1994 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001995 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 di.flags = hdev->flags;
1997 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001998 if (lmp_bredr_capable(hdev)) {
1999 di.acl_mtu = hdev->acl_mtu;
2000 di.acl_pkts = hdev->acl_pkts;
2001 di.sco_mtu = hdev->sco_mtu;
2002 di.sco_pkts = hdev->sco_pkts;
2003 } else {
2004 di.acl_mtu = hdev->le_mtu;
2005 di.acl_pkts = hdev->le_pkts;
2006 di.sco_mtu = 0;
2007 di.sco_pkts = 0;
2008 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 di.link_policy = hdev->link_policy;
2010 di.link_mode = hdev->link_mode;
2011
2012 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2013 memcpy(&di.features, &hdev->features, sizeof(di.features));
2014
2015 if (copy_to_user(arg, &di, sizeof(di)))
2016 err = -EFAULT;
2017
2018 hci_dev_put(hdev);
2019
2020 return err;
2021}
2022
2023/* ---- Interface to HCI drivers ---- */
2024
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002025static int hci_rfkill_set_block(void *data, bool blocked)
2026{
2027 struct hci_dev *hdev = data;
2028
2029 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2030
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002031 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2032 return -EBUSY;
2033
Johan Hedberg5e130362013-09-13 08:58:17 +03002034 if (blocked) {
2035 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002036 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2037 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002038 } else {
2039 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002040 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002041
2042 return 0;
2043}
2044
2045static const struct rfkill_ops hci_rfkill_ops = {
2046 .set_block = hci_rfkill_set_block,
2047};
2048
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002049static void hci_power_on(struct work_struct *work)
2050{
2051 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002052 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002053
2054 BT_DBG("%s", hdev->name);
2055
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002056 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002057 if (err < 0) {
2058 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002059 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002060 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002061
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002062 /* During the HCI setup phase, a few error conditions are
2063 * ignored and they need to be checked now. If they are still
2064 * valid, it is important to turn the device back off.
2065 */
2066 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2067 (hdev->dev_type == HCI_BREDR &&
2068 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2069 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002070 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2071 hci_dev_do_close(hdev);
2072 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002073 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2074 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002075 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002076
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002077 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002078 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002079}
2080
2081static void hci_power_off(struct work_struct *work)
2082{
Johan Hedberg32435532011-11-07 22:16:04 +02002083 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002084 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002085
2086 BT_DBG("%s", hdev->name);
2087
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002088 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002089}
2090
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002091static void hci_discov_off(struct work_struct *work)
2092{
2093 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002094
2095 hdev = container_of(work, struct hci_dev, discov_off.work);
2096
2097 BT_DBG("%s", hdev->name);
2098
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002099 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002100}
2101
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002102int hci_uuids_clear(struct hci_dev *hdev)
2103{
Johan Hedberg48210022013-01-27 00:31:28 +02002104 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002105
Johan Hedberg48210022013-01-27 00:31:28 +02002106 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2107 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002108 kfree(uuid);
2109 }
2110
2111 return 0;
2112}
2113
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002114int hci_link_keys_clear(struct hci_dev *hdev)
2115{
2116 struct list_head *p, *n;
2117
2118 list_for_each_safe(p, n, &hdev->link_keys) {
2119 struct link_key *key;
2120
2121 key = list_entry(p, struct link_key, list);
2122
2123 list_del(p);
2124 kfree(key);
2125 }
2126
2127 return 0;
2128}
2129
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002130int hci_smp_ltks_clear(struct hci_dev *hdev)
2131{
2132 struct smp_ltk *k, *tmp;
2133
2134 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2135 list_del(&k->list);
2136 kfree(k);
2137 }
2138
2139 return 0;
2140}
2141
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002142struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2143{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002144 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002145
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002146 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002147 if (bacmp(bdaddr, &k->bdaddr) == 0)
2148 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002149
2150 return NULL;
2151}
2152
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302153static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002154 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002155{
2156 /* Legacy key */
2157 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302158 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002159
2160 /* Debug keys are insecure so don't store them persistently */
2161 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302162 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002163
2164 /* Changed combination key and there's no previous one */
2165 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302166 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002167
2168 /* Security mode 3 case */
2169 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302170 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002171
2172 /* Neither local nor remote side had no-bonding as requirement */
2173 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302174 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002175
2176 /* Local side had dedicated bonding as requirement */
2177 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302178 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002179
2180 /* Remote side had dedicated bonding as requirement */
2181 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302182 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002183
2184 /* If none of the above criteria match, then don't store the key
2185 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302186 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002187}
2188
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002189struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002190{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002191 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002192
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002193 list_for_each_entry(k, &hdev->long_term_keys, list) {
2194 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002195 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002196 continue;
2197
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002198 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002199 }
2200
2201 return NULL;
2202}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002203
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002204struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002205 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002206{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002207 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002208
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002209 list_for_each_entry(k, &hdev->long_term_keys, list)
2210 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002211 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002212 return k;
2213
2214 return NULL;
2215}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002216
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002217int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002218 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002219{
2220 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302221 u8 old_key_type;
2222 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002223
2224 old_key = hci_find_link_key(hdev, bdaddr);
2225 if (old_key) {
2226 old_key_type = old_key->type;
2227 key = old_key;
2228 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002229 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002230 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2231 if (!key)
2232 return -ENOMEM;
2233 list_add(&key->list, &hdev->link_keys);
2234 }
2235
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002236 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002237
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002238 /* Some buggy controller combinations generate a changed
2239 * combination key for legacy pairing even when there's no
2240 * previous key */
2241 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002242 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002243 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002244 if (conn)
2245 conn->key_type = type;
2246 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002247
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002248 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002249 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002250 key->pin_len = pin_len;
2251
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002252 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002253 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002254 else
2255 key->type = type;
2256
Johan Hedberg4df378a2011-04-28 11:29:03 -07002257 if (!new_key)
2258 return 0;
2259
2260 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2261
Johan Hedberg744cf192011-11-08 20:40:14 +02002262 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002263
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302264 if (conn)
2265 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002266
2267 return 0;
2268}
2269
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002270int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002271 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002272 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002273{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002274 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002275
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002276 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2277 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002278
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002279 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2280 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002281 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002282 else {
2283 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002284 if (!key)
2285 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002286 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002287 }
2288
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002289 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002290 key->bdaddr_type = addr_type;
2291 memcpy(key->val, tk, sizeof(key->val));
2292 key->authenticated = authenticated;
2293 key->ediv = ediv;
2294 key->enc_size = enc_size;
2295 key->type = type;
2296 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002297
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002298 if (!new_key)
2299 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002300
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002301 if (type & HCI_SMP_LTK)
2302 mgmt_new_ltk(hdev, key, 1);
2303
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002304 return 0;
2305}
2306
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002307int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2308{
2309 struct link_key *key;
2310
2311 key = hci_find_link_key(hdev, bdaddr);
2312 if (!key)
2313 return -ENOENT;
2314
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002315 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002316
2317 list_del(&key->list);
2318 kfree(key);
2319
2320 return 0;
2321}
2322
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002323int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2324{
2325 struct smp_ltk *k, *tmp;
2326
2327 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2328 if (bacmp(bdaddr, &k->bdaddr))
2329 continue;
2330
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002331 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002332
2333 list_del(&k->list);
2334 kfree(k);
2335 }
2336
2337 return 0;
2338}
2339
Ville Tervo6bd32322011-02-16 16:32:41 +02002340/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002341static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002342{
2343 struct hci_dev *hdev = (void *) arg;
2344
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002345 if (hdev->sent_cmd) {
2346 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2347 u16 opcode = __le16_to_cpu(sent->opcode);
2348
2349 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2350 } else {
2351 BT_ERR("%s command tx timeout", hdev->name);
2352 }
2353
Ville Tervo6bd32322011-02-16 16:32:41 +02002354 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002355 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002356}
2357
Szymon Janc2763eda2011-03-22 13:12:22 +01002358struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002359 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002360{
2361 struct oob_data *data;
2362
2363 list_for_each_entry(data, &hdev->remote_oob_data, list)
2364 if (bacmp(bdaddr, &data->bdaddr) == 0)
2365 return data;
2366
2367 return NULL;
2368}
2369
2370int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2371{
2372 struct oob_data *data;
2373
2374 data = hci_find_remote_oob_data(hdev, bdaddr);
2375 if (!data)
2376 return -ENOENT;
2377
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002378 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002379
2380 list_del(&data->list);
2381 kfree(data);
2382
2383 return 0;
2384}
2385
2386int hci_remote_oob_data_clear(struct hci_dev *hdev)
2387{
2388 struct oob_data *data, *n;
2389
2390 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2391 list_del(&data->list);
2392 kfree(data);
2393 }
2394
2395 return 0;
2396}
2397
2398int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002399 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002400{
2401 struct oob_data *data;
2402
2403 data = hci_find_remote_oob_data(hdev, bdaddr);
2404
2405 if (!data) {
2406 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2407 if (!data)
2408 return -ENOMEM;
2409
2410 bacpy(&data->bdaddr, bdaddr);
2411 list_add(&data->list, &hdev->remote_oob_data);
2412 }
2413
2414 memcpy(data->hash, hash, sizeof(data->hash));
2415 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2416
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002417 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002418
2419 return 0;
2420}
2421
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002422struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2423 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002424{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002425 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002426
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002427 list_for_each_entry(b, &hdev->blacklist, list) {
2428 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002429 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002430 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002431
2432 return NULL;
2433}
2434
2435int hci_blacklist_clear(struct hci_dev *hdev)
2436{
2437 struct list_head *p, *n;
2438
2439 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002440 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002441
2442 list_del(p);
2443 kfree(b);
2444 }
2445
2446 return 0;
2447}
2448
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002449int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002450{
2451 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002452
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002453 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002454 return -EBADF;
2455
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002456 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002457 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002458
2459 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002460 if (!entry)
2461 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002462
2463 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002464 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002465
2466 list_add(&entry->list, &hdev->blacklist);
2467
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002468 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002469}
2470
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002471int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002472{
2473 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002474
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002475 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002476 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002477
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002478 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002479 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002480 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002481
2482 list_del(&entry->list);
2483 kfree(entry);
2484
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002485 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002486}
2487
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002488static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002489{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002490 if (status) {
2491 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002492
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002493 hci_dev_lock(hdev);
2494 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2495 hci_dev_unlock(hdev);
2496 return;
2497 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002498}
2499
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002500static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002501{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002502 /* General inquiry access code (GIAC) */
2503 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2504 struct hci_request req;
2505 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002506 int err;
2507
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002508 if (status) {
2509 BT_ERR("Failed to disable LE scanning: status %d", status);
2510 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002511 }
2512
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002513 switch (hdev->discovery.type) {
2514 case DISCOV_TYPE_LE:
2515 hci_dev_lock(hdev);
2516 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2517 hci_dev_unlock(hdev);
2518 break;
2519
2520 case DISCOV_TYPE_INTERLEAVED:
2521 hci_req_init(&req, hdev);
2522
2523 memset(&cp, 0, sizeof(cp));
2524 memcpy(&cp.lap, lap, sizeof(cp.lap));
2525 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2526 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2527
2528 hci_dev_lock(hdev);
2529
2530 hci_inquiry_cache_flush(hdev);
2531
2532 err = hci_req_run(&req, inquiry_complete);
2533 if (err) {
2534 BT_ERR("Inquiry request failed: err %d", err);
2535 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2536 }
2537
2538 hci_dev_unlock(hdev);
2539 break;
2540 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002541}
2542
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002543static void le_scan_disable_work(struct work_struct *work)
2544{
2545 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002546 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002547 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002548 struct hci_request req;
2549 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002550
2551 BT_DBG("%s", hdev->name);
2552
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002553 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002554
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002555 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002556 cp.enable = LE_SCAN_DISABLE;
2557 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002558
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002559 err = hci_req_run(&req, le_scan_disable_work_complete);
2560 if (err)
2561 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002562}
2563
David Herrmann9be0dab2012-04-22 14:39:57 +02002564/* Alloc HCI device */
2565struct hci_dev *hci_alloc_dev(void)
2566{
2567 struct hci_dev *hdev;
2568
2569 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2570 if (!hdev)
2571 return NULL;
2572
David Herrmannb1b813d2012-04-22 14:39:58 +02002573 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2574 hdev->esco_type = (ESCO_HV1);
2575 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002576 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2577 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002578 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2579 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002580
David Herrmannb1b813d2012-04-22 14:39:58 +02002581 hdev->sniff_max_interval = 800;
2582 hdev->sniff_min_interval = 80;
2583
Marcel Holtmannbef64732013-10-11 08:23:19 -07002584 hdev->le_scan_interval = 0x0060;
2585 hdev->le_scan_window = 0x0030;
2586
David Herrmannb1b813d2012-04-22 14:39:58 +02002587 mutex_init(&hdev->lock);
2588 mutex_init(&hdev->req_lock);
2589
2590 INIT_LIST_HEAD(&hdev->mgmt_pending);
2591 INIT_LIST_HEAD(&hdev->blacklist);
2592 INIT_LIST_HEAD(&hdev->uuids);
2593 INIT_LIST_HEAD(&hdev->link_keys);
2594 INIT_LIST_HEAD(&hdev->long_term_keys);
2595 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002596 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002597
2598 INIT_WORK(&hdev->rx_work, hci_rx_work);
2599 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2600 INIT_WORK(&hdev->tx_work, hci_tx_work);
2601 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002602
David Herrmannb1b813d2012-04-22 14:39:58 +02002603 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2604 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2605 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2606
David Herrmannb1b813d2012-04-22 14:39:58 +02002607 skb_queue_head_init(&hdev->rx_q);
2608 skb_queue_head_init(&hdev->cmd_q);
2609 skb_queue_head_init(&hdev->raw_q);
2610
2611 init_waitqueue_head(&hdev->req_wait_q);
2612
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002613 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002614
David Herrmannb1b813d2012-04-22 14:39:58 +02002615 hci_init_sysfs(hdev);
2616 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002617
2618 return hdev;
2619}
2620EXPORT_SYMBOL(hci_alloc_dev);
2621
2622/* Free HCI device */
2623void hci_free_dev(struct hci_dev *hdev)
2624{
David Herrmann9be0dab2012-04-22 14:39:57 +02002625 /* will free via device release */
2626 put_device(&hdev->dev);
2627}
2628EXPORT_SYMBOL(hci_free_dev);
2629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630/* Register HCI device */
2631int hci_register_dev(struct hci_dev *hdev)
2632{
David Herrmannb1b813d2012-04-22 14:39:58 +02002633 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634
David Herrmann010666a2012-01-07 15:47:07 +01002635 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 return -EINVAL;
2637
Mat Martineau08add512011-11-02 16:18:36 -07002638 /* Do not allow HCI_AMP devices to register at index 0,
2639 * so the index can be used as the AMP controller ID.
2640 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002641 switch (hdev->dev_type) {
2642 case HCI_BREDR:
2643 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2644 break;
2645 case HCI_AMP:
2646 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2647 break;
2648 default:
2649 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002651
Sasha Levin3df92b32012-05-27 22:36:56 +02002652 if (id < 0)
2653 return id;
2654
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 sprintf(hdev->name, "hci%d", id);
2656 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002657
2658 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2659
Kees Cookd8537542013-07-03 15:04:57 -07002660 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2661 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002662 if (!hdev->workqueue) {
2663 error = -ENOMEM;
2664 goto err;
2665 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002666
Kees Cookd8537542013-07-03 15:04:57 -07002667 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2668 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002669 if (!hdev->req_workqueue) {
2670 destroy_workqueue(hdev->workqueue);
2671 error = -ENOMEM;
2672 goto err;
2673 }
2674
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002675 if (!IS_ERR_OR_NULL(bt_debugfs))
2676 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2677
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002678 dev_set_name(&hdev->dev, "%s", hdev->name);
2679
2680 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002681 if (error < 0)
2682 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002684 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002685 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2686 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002687 if (hdev->rfkill) {
2688 if (rfkill_register(hdev->rfkill) < 0) {
2689 rfkill_destroy(hdev->rfkill);
2690 hdev->rfkill = NULL;
2691 }
2692 }
2693
Johan Hedberg5e130362013-09-13 08:58:17 +03002694 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2695 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2696
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002697 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002698 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002699
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002700 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002701 /* Assume BR/EDR support until proven otherwise (such as
2702 * through reading supported features during init.
2703 */
2704 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2705 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002706
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002707 write_lock(&hci_dev_list_lock);
2708 list_add(&hdev->list, &hci_dev_list);
2709 write_unlock(&hci_dev_list_lock);
2710
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002712 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713
Johan Hedberg19202572013-01-14 22:33:51 +02002714 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002715
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002717
David Herrmann33ca9542011-10-08 14:58:49 +02002718err_wqueue:
2719 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002720 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002721err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002722 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002723
David Herrmann33ca9542011-10-08 14:58:49 +02002724 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725}
2726EXPORT_SYMBOL(hci_register_dev);
2727
2728/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002729void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730{
Sasha Levin3df92b32012-05-27 22:36:56 +02002731 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002732
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002733 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734
Johan Hovold94324962012-03-15 14:48:41 +01002735 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2736
Sasha Levin3df92b32012-05-27 22:36:56 +02002737 id = hdev->id;
2738
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002739 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002741 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 hci_dev_do_close(hdev);
2744
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302745 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002746 kfree_skb(hdev->reassembly[i]);
2747
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002748 cancel_work_sync(&hdev->power_on);
2749
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002750 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002751 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002752 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002753 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002754 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002755 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002756
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002757 /* mgmt_index_removed should take care of emptying the
2758 * pending list */
2759 BUG_ON(!list_empty(&hdev->mgmt_pending));
2760
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 hci_notify(hdev, HCI_DEV_UNREG);
2762
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002763 if (hdev->rfkill) {
2764 rfkill_unregister(hdev->rfkill);
2765 rfkill_destroy(hdev->rfkill);
2766 }
2767
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002768 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002769
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002770 debugfs_remove_recursive(hdev->debugfs);
2771
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002772 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002773 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002774
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002775 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002776 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002777 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002778 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002779 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002780 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002781 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002782
David Herrmanndc946bd2012-01-07 15:47:24 +01002783 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002784
2785 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786}
2787EXPORT_SYMBOL(hci_unregister_dev);
2788
2789/* Suspend HCI device */
2790int hci_suspend_dev(struct hci_dev *hdev)
2791{
2792 hci_notify(hdev, HCI_DEV_SUSPEND);
2793 return 0;
2794}
2795EXPORT_SYMBOL(hci_suspend_dev);
2796
2797/* Resume HCI device */
2798int hci_resume_dev(struct hci_dev *hdev)
2799{
2800 hci_notify(hdev, HCI_DEV_RESUME);
2801 return 0;
2802}
2803EXPORT_SYMBOL(hci_resume_dev);
2804
Marcel Holtmann76bca882009-11-18 00:40:39 +01002805/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002806int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002807{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002808 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002809 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002810 kfree_skb(skb);
2811 return -ENXIO;
2812 }
2813
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002814 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002815 bt_cb(skb)->incoming = 1;
2816
2817 /* Time stamp */
2818 __net_timestamp(skb);
2819
Marcel Holtmann76bca882009-11-18 00:40:39 +01002820 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002821 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002822
Marcel Holtmann76bca882009-11-18 00:40:39 +01002823 return 0;
2824}
2825EXPORT_SYMBOL(hci_recv_frame);
2826
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302827static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002828 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302829{
2830 int len = 0;
2831 int hlen = 0;
2832 int remain = count;
2833 struct sk_buff *skb;
2834 struct bt_skb_cb *scb;
2835
2836 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002837 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302838 return -EILSEQ;
2839
2840 skb = hdev->reassembly[index];
2841
2842 if (!skb) {
2843 switch (type) {
2844 case HCI_ACLDATA_PKT:
2845 len = HCI_MAX_FRAME_SIZE;
2846 hlen = HCI_ACL_HDR_SIZE;
2847 break;
2848 case HCI_EVENT_PKT:
2849 len = HCI_MAX_EVENT_SIZE;
2850 hlen = HCI_EVENT_HDR_SIZE;
2851 break;
2852 case HCI_SCODATA_PKT:
2853 len = HCI_MAX_SCO_SIZE;
2854 hlen = HCI_SCO_HDR_SIZE;
2855 break;
2856 }
2857
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002858 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302859 if (!skb)
2860 return -ENOMEM;
2861
2862 scb = (void *) skb->cb;
2863 scb->expect = hlen;
2864 scb->pkt_type = type;
2865
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302866 hdev->reassembly[index] = skb;
2867 }
2868
2869 while (count) {
2870 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002871 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302872
2873 memcpy(skb_put(skb, len), data, len);
2874
2875 count -= len;
2876 data += len;
2877 scb->expect -= len;
2878 remain = count;
2879
2880 switch (type) {
2881 case HCI_EVENT_PKT:
2882 if (skb->len == HCI_EVENT_HDR_SIZE) {
2883 struct hci_event_hdr *h = hci_event_hdr(skb);
2884 scb->expect = h->plen;
2885
2886 if (skb_tailroom(skb) < scb->expect) {
2887 kfree_skb(skb);
2888 hdev->reassembly[index] = NULL;
2889 return -ENOMEM;
2890 }
2891 }
2892 break;
2893
2894 case HCI_ACLDATA_PKT:
2895 if (skb->len == HCI_ACL_HDR_SIZE) {
2896 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2897 scb->expect = __le16_to_cpu(h->dlen);
2898
2899 if (skb_tailroom(skb) < scb->expect) {
2900 kfree_skb(skb);
2901 hdev->reassembly[index] = NULL;
2902 return -ENOMEM;
2903 }
2904 }
2905 break;
2906
2907 case HCI_SCODATA_PKT:
2908 if (skb->len == HCI_SCO_HDR_SIZE) {
2909 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2910 scb->expect = h->dlen;
2911
2912 if (skb_tailroom(skb) < scb->expect) {
2913 kfree_skb(skb);
2914 hdev->reassembly[index] = NULL;
2915 return -ENOMEM;
2916 }
2917 }
2918 break;
2919 }
2920
2921 if (scb->expect == 0) {
2922 /* Complete frame */
2923
2924 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002925 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302926
2927 hdev->reassembly[index] = NULL;
2928 return remain;
2929 }
2930 }
2931
2932 return remain;
2933}
2934
Marcel Holtmannef222012007-07-11 06:42:04 +02002935int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2936{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302937 int rem = 0;
2938
Marcel Holtmannef222012007-07-11 06:42:04 +02002939 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2940 return -EILSEQ;
2941
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002942 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002943 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302944 if (rem < 0)
2945 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002946
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302947 data += (count - rem);
2948 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002949 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002950
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302951 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002952}
2953EXPORT_SYMBOL(hci_recv_fragment);
2954
Suraj Sumangala99811512010-07-14 13:02:19 +05302955#define STREAM_REASSEMBLY 0
2956
2957int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2958{
2959 int type;
2960 int rem = 0;
2961
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002962 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302963 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2964
2965 if (!skb) {
2966 struct { char type; } *pkt;
2967
2968 /* Start of the frame */
2969 pkt = data;
2970 type = pkt->type;
2971
2972 data++;
2973 count--;
2974 } else
2975 type = bt_cb(skb)->pkt_type;
2976
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002977 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002978 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302979 if (rem < 0)
2980 return rem;
2981
2982 data += (count - rem);
2983 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002984 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302985
2986 return rem;
2987}
2988EXPORT_SYMBOL(hci_recv_stream_fragment);
2989
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990/* ---- Interface to upper protocols ---- */
2991
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992int hci_register_cb(struct hci_cb *cb)
2993{
2994 BT_DBG("%p name %s", cb, cb->name);
2995
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002996 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002998 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999
3000 return 0;
3001}
3002EXPORT_SYMBOL(hci_register_cb);
3003
3004int hci_unregister_cb(struct hci_cb *cb)
3005{
3006 BT_DBG("%p name %s", cb, cb->name);
3007
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003008 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003010 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
3012 return 0;
3013}
3014EXPORT_SYMBOL(hci_unregister_cb);
3015
Marcel Holtmann51086992013-10-10 14:54:19 -07003016static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003018 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003020 /* Time stamp */
3021 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003023 /* Send copy to monitor */
3024 hci_send_to_monitor(hdev, skb);
3025
3026 if (atomic_read(&hdev->promisc)) {
3027 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003028 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 }
3030
3031 /* Get rid of skb owner, prior to sending to the driver. */
3032 skb_orphan(skb);
3033
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003034 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003035 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036}
3037
Johan Hedberg3119ae92013-03-05 20:37:44 +02003038void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3039{
3040 skb_queue_head_init(&req->cmd_q);
3041 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003042 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003043}
3044
3045int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3046{
3047 struct hci_dev *hdev = req->hdev;
3048 struct sk_buff *skb;
3049 unsigned long flags;
3050
3051 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3052
Andre Guedes5d73e032013-03-08 11:20:16 -03003053 /* If an error occured during request building, remove all HCI
3054 * commands queued on the HCI request queue.
3055 */
3056 if (req->err) {
3057 skb_queue_purge(&req->cmd_q);
3058 return req->err;
3059 }
3060
Johan Hedberg3119ae92013-03-05 20:37:44 +02003061 /* Do not allow empty requests */
3062 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003063 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003064
3065 skb = skb_peek_tail(&req->cmd_q);
3066 bt_cb(skb)->req.complete = complete;
3067
3068 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3069 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3070 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3071
3072 queue_work(hdev->workqueue, &hdev->cmd_work);
3073
3074 return 0;
3075}
3076
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003077static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003078 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079{
3080 int len = HCI_COMMAND_HDR_SIZE + plen;
3081 struct hci_command_hdr *hdr;
3082 struct sk_buff *skb;
3083
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003085 if (!skb)
3086 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087
3088 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003089 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 hdr->plen = plen;
3091
3092 if (plen)
3093 memcpy(skb_put(skb, plen), param, plen);
3094
3095 BT_DBG("skb len %d", skb->len);
3096
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003097 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003098
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003099 return skb;
3100}
3101
3102/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003103int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3104 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003105{
3106 struct sk_buff *skb;
3107
3108 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3109
3110 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3111 if (!skb) {
3112 BT_ERR("%s no memory for command", hdev->name);
3113 return -ENOMEM;
3114 }
3115
Johan Hedberg11714b32013-03-05 20:37:47 +02003116 /* Stand-alone HCI commands must be flaged as
3117 * single-command requests.
3118 */
3119 bt_cb(skb)->req.start = true;
3120
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003122 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123
3124 return 0;
3125}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126
Johan Hedberg71c76a12013-03-05 20:37:46 +02003127/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003128void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3129 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003130{
3131 struct hci_dev *hdev = req->hdev;
3132 struct sk_buff *skb;
3133
3134 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3135
Andre Guedes34739c12013-03-08 11:20:18 -03003136 /* If an error occured during request building, there is no point in
3137 * queueing the HCI command. We can simply return.
3138 */
3139 if (req->err)
3140 return;
3141
Johan Hedberg71c76a12013-03-05 20:37:46 +02003142 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3143 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003144 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3145 hdev->name, opcode);
3146 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003147 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003148 }
3149
3150 if (skb_queue_empty(&req->cmd_q))
3151 bt_cb(skb)->req.start = true;
3152
Johan Hedberg02350a72013-04-03 21:50:29 +03003153 bt_cb(skb)->req.event = event;
3154
Johan Hedberg71c76a12013-03-05 20:37:46 +02003155 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003156}
3157
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003158void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3159 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003160{
3161 hci_req_add_ev(req, opcode, plen, param, 0);
3162}
3163
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003165void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166{
3167 struct hci_command_hdr *hdr;
3168
3169 if (!hdev->sent_cmd)
3170 return NULL;
3171
3172 hdr = (void *) hdev->sent_cmd->data;
3173
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003174 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 return NULL;
3176
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003177 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178
3179 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3180}
3181
3182/* Send ACL data */
3183static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3184{
3185 struct hci_acl_hdr *hdr;
3186 int len = skb->len;
3187
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003188 skb_push(skb, HCI_ACL_HDR_SIZE);
3189 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003190 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003191 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3192 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193}
3194
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003195static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003196 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003198 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 struct hci_dev *hdev = conn->hdev;
3200 struct sk_buff *list;
3201
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003202 skb->len = skb_headlen(skb);
3203 skb->data_len = 0;
3204
3205 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003206
3207 switch (hdev->dev_type) {
3208 case HCI_BREDR:
3209 hci_add_acl_hdr(skb, conn->handle, flags);
3210 break;
3211 case HCI_AMP:
3212 hci_add_acl_hdr(skb, chan->handle, flags);
3213 break;
3214 default:
3215 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3216 return;
3217 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003218
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003219 list = skb_shinfo(skb)->frag_list;
3220 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 /* Non fragmented */
3222 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3223
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003224 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 } else {
3226 /* Fragmented */
3227 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3228
3229 skb_shinfo(skb)->frag_list = NULL;
3230
3231 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003232 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003234 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003235
3236 flags &= ~ACL_START;
3237 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 do {
3239 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003240
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003241 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003242 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243
3244 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3245
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003246 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 } while (list);
3248
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003249 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003251}
3252
3253void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3254{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003255 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003256
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003257 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003258
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003259 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003261 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263
3264/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003265void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266{
3267 struct hci_dev *hdev = conn->hdev;
3268 struct hci_sco_hdr hdr;
3269
3270 BT_DBG("%s len %d", hdev->name, skb->len);
3271
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003272 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273 hdr.dlen = skb->len;
3274
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003275 skb_push(skb, HCI_SCO_HDR_SIZE);
3276 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003277 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003279 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003280
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003282 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284
3285/* ---- HCI TX task (outgoing data) ---- */
3286
3287/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003288static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3289 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290{
3291 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003292 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003293 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003295 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003297
3298 rcu_read_lock();
3299
3300 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003301 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003303
3304 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3305 continue;
3306
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 num++;
3308
3309 if (c->sent < min) {
3310 min = c->sent;
3311 conn = c;
3312 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003313
3314 if (hci_conn_num(hdev, type) == num)
3315 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 }
3317
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003318 rcu_read_unlock();
3319
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003321 int cnt, q;
3322
3323 switch (conn->type) {
3324 case ACL_LINK:
3325 cnt = hdev->acl_cnt;
3326 break;
3327 case SCO_LINK:
3328 case ESCO_LINK:
3329 cnt = hdev->sco_cnt;
3330 break;
3331 case LE_LINK:
3332 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3333 break;
3334 default:
3335 cnt = 0;
3336 BT_ERR("Unknown link type");
3337 }
3338
3339 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 *quote = q ? q : 1;
3341 } else
3342 *quote = 0;
3343
3344 BT_DBG("conn %p quote %d", conn, *quote);
3345 return conn;
3346}
3347
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003348static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349{
3350 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003351 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352
Ville Tervobae1f5d92011-02-10 22:38:53 -03003353 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003355 rcu_read_lock();
3356
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003358 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003359 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003360 BT_ERR("%s killing stalled connection %pMR",
3361 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003362 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 }
3364 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003365
3366 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367}
3368
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003369static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3370 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003371{
3372 struct hci_conn_hash *h = &hdev->conn_hash;
3373 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003374 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003375 struct hci_conn *conn;
3376 int cnt, q, conn_num = 0;
3377
3378 BT_DBG("%s", hdev->name);
3379
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003380 rcu_read_lock();
3381
3382 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003383 struct hci_chan *tmp;
3384
3385 if (conn->type != type)
3386 continue;
3387
3388 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3389 continue;
3390
3391 conn_num++;
3392
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003393 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003394 struct sk_buff *skb;
3395
3396 if (skb_queue_empty(&tmp->data_q))
3397 continue;
3398
3399 skb = skb_peek(&tmp->data_q);
3400 if (skb->priority < cur_prio)
3401 continue;
3402
3403 if (skb->priority > cur_prio) {
3404 num = 0;
3405 min = ~0;
3406 cur_prio = skb->priority;
3407 }
3408
3409 num++;
3410
3411 if (conn->sent < min) {
3412 min = conn->sent;
3413 chan = tmp;
3414 }
3415 }
3416
3417 if (hci_conn_num(hdev, type) == conn_num)
3418 break;
3419 }
3420
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003421 rcu_read_unlock();
3422
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003423 if (!chan)
3424 return NULL;
3425
3426 switch (chan->conn->type) {
3427 case ACL_LINK:
3428 cnt = hdev->acl_cnt;
3429 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003430 case AMP_LINK:
3431 cnt = hdev->block_cnt;
3432 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003433 case SCO_LINK:
3434 case ESCO_LINK:
3435 cnt = hdev->sco_cnt;
3436 break;
3437 case LE_LINK:
3438 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3439 break;
3440 default:
3441 cnt = 0;
3442 BT_ERR("Unknown link type");
3443 }
3444
3445 q = cnt / num;
3446 *quote = q ? q : 1;
3447 BT_DBG("chan %p quote %d", chan, *quote);
3448 return chan;
3449}
3450
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003451static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3452{
3453 struct hci_conn_hash *h = &hdev->conn_hash;
3454 struct hci_conn *conn;
3455 int num = 0;
3456
3457 BT_DBG("%s", hdev->name);
3458
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003459 rcu_read_lock();
3460
3461 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003462 struct hci_chan *chan;
3463
3464 if (conn->type != type)
3465 continue;
3466
3467 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3468 continue;
3469
3470 num++;
3471
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003472 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003473 struct sk_buff *skb;
3474
3475 if (chan->sent) {
3476 chan->sent = 0;
3477 continue;
3478 }
3479
3480 if (skb_queue_empty(&chan->data_q))
3481 continue;
3482
3483 skb = skb_peek(&chan->data_q);
3484 if (skb->priority >= HCI_PRIO_MAX - 1)
3485 continue;
3486
3487 skb->priority = HCI_PRIO_MAX - 1;
3488
3489 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003490 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003491 }
3492
3493 if (hci_conn_num(hdev, type) == num)
3494 break;
3495 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003496
3497 rcu_read_unlock();
3498
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003499}
3500
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003501static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3502{
3503 /* Calculate count of blocks used by this packet */
3504 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3505}
3506
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003507static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 if (!test_bit(HCI_RAW, &hdev->flags)) {
3510 /* ACL tx timeout must be longer than maximum
3511 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003512 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003513 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003514 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003516}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003518static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003519{
3520 unsigned int cnt = hdev->acl_cnt;
3521 struct hci_chan *chan;
3522 struct sk_buff *skb;
3523 int quote;
3524
3525 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003526
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003527 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003528 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003529 u32 priority = (skb_peek(&chan->data_q))->priority;
3530 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003531 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003532 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003533
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003534 /* Stop if priority has changed */
3535 if (skb->priority < priority)
3536 break;
3537
3538 skb = skb_dequeue(&chan->data_q);
3539
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003540 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003541 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003542
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003543 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 hdev->acl_last_tx = jiffies;
3545
3546 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003547 chan->sent++;
3548 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 }
3550 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003551
3552 if (cnt != hdev->acl_cnt)
3553 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554}
3555
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003556static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003557{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003558 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003559 struct hci_chan *chan;
3560 struct sk_buff *skb;
3561 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003562 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003563
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003564 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003565
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003566 BT_DBG("%s", hdev->name);
3567
3568 if (hdev->dev_type == HCI_AMP)
3569 type = AMP_LINK;
3570 else
3571 type = ACL_LINK;
3572
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003573 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003574 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003575 u32 priority = (skb_peek(&chan->data_q))->priority;
3576 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3577 int blocks;
3578
3579 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003580 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003581
3582 /* Stop if priority has changed */
3583 if (skb->priority < priority)
3584 break;
3585
3586 skb = skb_dequeue(&chan->data_q);
3587
3588 blocks = __get_blocks(hdev, skb);
3589 if (blocks > hdev->block_cnt)
3590 return;
3591
3592 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003593 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003594
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003595 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003596 hdev->acl_last_tx = jiffies;
3597
3598 hdev->block_cnt -= blocks;
3599 quote -= blocks;
3600
3601 chan->sent += blocks;
3602 chan->conn->sent += blocks;
3603 }
3604 }
3605
3606 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003607 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003608}
3609
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003610static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003611{
3612 BT_DBG("%s", hdev->name);
3613
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003614 /* No ACL link over BR/EDR controller */
3615 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3616 return;
3617
3618 /* No AMP link over AMP controller */
3619 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003620 return;
3621
3622 switch (hdev->flow_ctl_mode) {
3623 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3624 hci_sched_acl_pkt(hdev);
3625 break;
3626
3627 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3628 hci_sched_acl_blk(hdev);
3629 break;
3630 }
3631}
3632
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003634static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635{
3636 struct hci_conn *conn;
3637 struct sk_buff *skb;
3638 int quote;
3639
3640 BT_DBG("%s", hdev->name);
3641
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003642 if (!hci_conn_num(hdev, SCO_LINK))
3643 return;
3644
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3646 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3647 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003648 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649
3650 conn->sent++;
3651 if (conn->sent == ~0)
3652 conn->sent = 0;
3653 }
3654 }
3655}
3656
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003657static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003658{
3659 struct hci_conn *conn;
3660 struct sk_buff *skb;
3661 int quote;
3662
3663 BT_DBG("%s", hdev->name);
3664
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003665 if (!hci_conn_num(hdev, ESCO_LINK))
3666 return;
3667
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003668 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3669 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003670 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3671 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003672 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003673
3674 conn->sent++;
3675 if (conn->sent == ~0)
3676 conn->sent = 0;
3677 }
3678 }
3679}
3680
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003681static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003682{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003683 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003684 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003685 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003686
3687 BT_DBG("%s", hdev->name);
3688
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003689 if (!hci_conn_num(hdev, LE_LINK))
3690 return;
3691
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003692 if (!test_bit(HCI_RAW, &hdev->flags)) {
3693 /* LE tx timeout must be longer than maximum
3694 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003695 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003696 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003697 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003698 }
3699
3700 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003701 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003702 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003703 u32 priority = (skb_peek(&chan->data_q))->priority;
3704 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003705 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003706 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003707
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003708 /* Stop if priority has changed */
3709 if (skb->priority < priority)
3710 break;
3711
3712 skb = skb_dequeue(&chan->data_q);
3713
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003714 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003715 hdev->le_last_tx = jiffies;
3716
3717 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003718 chan->sent++;
3719 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003720 }
3721 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003722
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003723 if (hdev->le_pkts)
3724 hdev->le_cnt = cnt;
3725 else
3726 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003727
3728 if (cnt != tmp)
3729 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003730}
3731
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003732static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003734 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 struct sk_buff *skb;
3736
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003737 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003738 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739
Marcel Holtmann52de5992013-09-03 18:08:38 -07003740 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3741 /* Schedule queues and send stuff to HCI driver */
3742 hci_sched_acl(hdev);
3743 hci_sched_sco(hdev);
3744 hci_sched_esco(hdev);
3745 hci_sched_le(hdev);
3746 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003747
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 /* Send next queued raw (unknown type) packet */
3749 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003750 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751}
3752
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003753/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754
3755/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003756static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757{
3758 struct hci_acl_hdr *hdr = (void *) skb->data;
3759 struct hci_conn *conn;
3760 __u16 handle, flags;
3761
3762 skb_pull(skb, HCI_ACL_HDR_SIZE);
3763
3764 handle = __le16_to_cpu(hdr->handle);
3765 flags = hci_flags(handle);
3766 handle = hci_handle(handle);
3767
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003768 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003769 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770
3771 hdev->stat.acl_rx++;
3772
3773 hci_dev_lock(hdev);
3774 conn = hci_conn_hash_lookup_handle(hdev, handle);
3775 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003776
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003778 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003779
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003781 l2cap_recv_acldata(conn, skb, flags);
3782 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003784 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003785 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786 }
3787
3788 kfree_skb(skb);
3789}
3790
3791/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003792static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793{
3794 struct hci_sco_hdr *hdr = (void *) skb->data;
3795 struct hci_conn *conn;
3796 __u16 handle;
3797
3798 skb_pull(skb, HCI_SCO_HDR_SIZE);
3799
3800 handle = __le16_to_cpu(hdr->handle);
3801
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003802 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803
3804 hdev->stat.sco_rx++;
3805
3806 hci_dev_lock(hdev);
3807 conn = hci_conn_hash_lookup_handle(hdev, handle);
3808 hci_dev_unlock(hdev);
3809
3810 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003812 sco_recv_scodata(conn, skb);
3813 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003815 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003816 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 }
3818
3819 kfree_skb(skb);
3820}
3821
Johan Hedberg9238f362013-03-05 20:37:48 +02003822static bool hci_req_is_complete(struct hci_dev *hdev)
3823{
3824 struct sk_buff *skb;
3825
3826 skb = skb_peek(&hdev->cmd_q);
3827 if (!skb)
3828 return true;
3829
3830 return bt_cb(skb)->req.start;
3831}
3832
Johan Hedberg42c6b122013-03-05 20:37:49 +02003833static void hci_resend_last(struct hci_dev *hdev)
3834{
3835 struct hci_command_hdr *sent;
3836 struct sk_buff *skb;
3837 u16 opcode;
3838
3839 if (!hdev->sent_cmd)
3840 return;
3841
3842 sent = (void *) hdev->sent_cmd->data;
3843 opcode = __le16_to_cpu(sent->opcode);
3844 if (opcode == HCI_OP_RESET)
3845 return;
3846
3847 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3848 if (!skb)
3849 return;
3850
3851 skb_queue_head(&hdev->cmd_q, skb);
3852 queue_work(hdev->workqueue, &hdev->cmd_work);
3853}
3854
Johan Hedberg9238f362013-03-05 20:37:48 +02003855void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3856{
3857 hci_req_complete_t req_complete = NULL;
3858 struct sk_buff *skb;
3859 unsigned long flags;
3860
3861 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3862
Johan Hedberg42c6b122013-03-05 20:37:49 +02003863 /* If the completed command doesn't match the last one that was
3864 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003865 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003866 if (!hci_sent_cmd_data(hdev, opcode)) {
3867 /* Some CSR based controllers generate a spontaneous
3868 * reset complete event during init and any pending
3869 * command will never be completed. In such a case we
3870 * need to resend whatever was the last sent
3871 * command.
3872 */
3873 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3874 hci_resend_last(hdev);
3875
Johan Hedberg9238f362013-03-05 20:37:48 +02003876 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003877 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003878
3879 /* If the command succeeded and there's still more commands in
3880 * this request the request is not yet complete.
3881 */
3882 if (!status && !hci_req_is_complete(hdev))
3883 return;
3884
3885 /* If this was the last command in a request the complete
3886 * callback would be found in hdev->sent_cmd instead of the
3887 * command queue (hdev->cmd_q).
3888 */
3889 if (hdev->sent_cmd) {
3890 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003891
3892 if (req_complete) {
3893 /* We must set the complete callback to NULL to
3894 * avoid calling the callback more than once if
3895 * this function gets called again.
3896 */
3897 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3898
Johan Hedberg9238f362013-03-05 20:37:48 +02003899 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003900 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003901 }
3902
3903 /* Remove all pending commands belonging to this request */
3904 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3905 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3906 if (bt_cb(skb)->req.start) {
3907 __skb_queue_head(&hdev->cmd_q, skb);
3908 break;
3909 }
3910
3911 req_complete = bt_cb(skb)->req.complete;
3912 kfree_skb(skb);
3913 }
3914 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3915
3916call_complete:
3917 if (req_complete)
3918 req_complete(hdev, status);
3919}
3920
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003921static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003923 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 struct sk_buff *skb;
3925
3926 BT_DBG("%s", hdev->name);
3927
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003929 /* Send copy to monitor */
3930 hci_send_to_monitor(hdev, skb);
3931
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 if (atomic_read(&hdev->promisc)) {
3933 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003934 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 }
3936
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003937 if (test_bit(HCI_RAW, &hdev->flags) ||
3938 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 kfree_skb(skb);
3940 continue;
3941 }
3942
3943 if (test_bit(HCI_INIT, &hdev->flags)) {
3944 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003945 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946 case HCI_ACLDATA_PKT:
3947 case HCI_SCODATA_PKT:
3948 kfree_skb(skb);
3949 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003950 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 }
3952
3953 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003954 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003956 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 hci_event_packet(hdev, skb);
3958 break;
3959
3960 case HCI_ACLDATA_PKT:
3961 BT_DBG("%s ACL data packet", hdev->name);
3962 hci_acldata_packet(hdev, skb);
3963 break;
3964
3965 case HCI_SCODATA_PKT:
3966 BT_DBG("%s SCO data packet", hdev->name);
3967 hci_scodata_packet(hdev, skb);
3968 break;
3969
3970 default:
3971 kfree_skb(skb);
3972 break;
3973 }
3974 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975}
3976
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003977static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003979 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980 struct sk_buff *skb;
3981
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003982 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3983 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003986 if (atomic_read(&hdev->cmd_cnt)) {
3987 skb = skb_dequeue(&hdev->cmd_q);
3988 if (!skb)
3989 return;
3990
Wei Yongjun7585b972009-02-25 18:29:52 +08003991 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003993 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003994 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003996 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003997 if (test_bit(HCI_RESET, &hdev->flags))
3998 del_timer(&hdev->cmd_timer);
3999 else
4000 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004001 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002 } else {
4003 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004004 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 }
4006 }
4007}