blob: 3ef5fdde843791519fc6df49ae51077b3ee0a013 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
75 hci_dev_unlock(hdev);
76
77 return 0;
78}
79
80static int features_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, features_show, inode->i_private);
83}
84
85static const struct file_operations features_fops = {
86 .open = features_open,
87 .read = seq_read,
88 .llseek = seq_lseek,
89 .release = single_release,
90};
91
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070092static int blacklist_show(struct seq_file *f, void *p)
93{
94 struct hci_dev *hdev = f->private;
95 struct bdaddr_list *b;
96
97 hci_dev_lock(hdev);
98 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -070099 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700100 hci_dev_unlock(hdev);
101
102 return 0;
103}
104
105static int blacklist_open(struct inode *inode, struct file *file)
106{
107 return single_open(file, blacklist_show, inode->i_private);
108}
109
110static const struct file_operations blacklist_fops = {
111 .open = blacklist_open,
112 .read = seq_read,
113 .llseek = seq_lseek,
114 .release = single_release,
115};
116
Marcel Holtmann47219832013-10-17 17:24:15 -0700117static int uuids_show(struct seq_file *f, void *p)
118{
119 struct hci_dev *hdev = f->private;
120 struct bt_uuid *uuid;
121
122 hci_dev_lock(hdev);
123 list_for_each_entry(uuid, &hdev->uuids, list) {
124 u32 data0, data5;
125 u16 data1, data2, data3, data4;
126
127 data5 = get_unaligned_le32(uuid);
128 data4 = get_unaligned_le16(uuid + 4);
129 data3 = get_unaligned_le16(uuid + 6);
130 data2 = get_unaligned_le16(uuid + 8);
131 data1 = get_unaligned_le16(uuid + 10);
132 data0 = get_unaligned_le32(uuid + 12);
133
134 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135 data0, data1, data2, data3, data4, data5);
136 }
137 hci_dev_unlock(hdev);
138
139 return 0;
140}
141
142static int uuids_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, uuids_show, inode->i_private);
145}
146
147static const struct file_operations uuids_fops = {
148 .open = uuids_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = single_release,
152};
153
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700154static int inquiry_cache_show(struct seq_file *f, void *p)
155{
156 struct hci_dev *hdev = f->private;
157 struct discovery_state *cache = &hdev->discovery;
158 struct inquiry_entry *e;
159
160 hci_dev_lock(hdev);
161
162 list_for_each_entry(e, &cache->all, all) {
163 struct inquiry_data *data = &e->data;
164 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
165 &data->bdaddr,
166 data->pscan_rep_mode, data->pscan_period_mode,
167 data->pscan_mode, data->dev_class[2],
168 data->dev_class[1], data->dev_class[0],
169 __le16_to_cpu(data->clock_offset),
170 data->rssi, data->ssp_mode, e->timestamp);
171 }
172
173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int inquiry_cache_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, inquiry_cache_show, inode->i_private);
181}
182
183static const struct file_operations inquiry_cache_fops = {
184 .open = inquiry_cache_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700190static int dev_class_show(struct seq_file *f, void *ptr)
191{
192 struct hci_dev *hdev = f->private;
193
194 hci_dev_lock(hdev);
195 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
196 hdev->dev_class[1], hdev->dev_class[0]);
197 hci_dev_unlock(hdev);
198
199 return 0;
200}
201
202static int dev_class_open(struct inode *inode, struct file *file)
203{
204 return single_open(file, dev_class_show, inode->i_private);
205}
206
207static const struct file_operations dev_class_fops = {
208 .open = dev_class_open,
209 .read = seq_read,
210 .llseek = seq_lseek,
211 .release = single_release,
212};
213
Marcel Holtmann041000b2013-10-17 12:02:31 -0700214static int voice_setting_get(void *data, u64 *val)
215{
216 struct hci_dev *hdev = data;
217
218 hci_dev_lock(hdev);
219 *val = hdev->voice_setting;
220 hci_dev_unlock(hdev);
221
222 return 0;
223}
224
225DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
226 NULL, "0x%4.4llx\n");
227
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700228static int auto_accept_delay_set(void *data, u64 val)
229{
230 struct hci_dev *hdev = data;
231
232 hci_dev_lock(hdev);
233 hdev->auto_accept_delay = val;
234 hci_dev_unlock(hdev);
235
236 return 0;
237}
238
239static int auto_accept_delay_get(void *data, u64 *val)
240{
241 struct hci_dev *hdev = data;
242
243 hci_dev_lock(hdev);
244 *val = hdev->auto_accept_delay;
245 hci_dev_unlock(hdev);
246
247 return 0;
248}
249
250DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
251 auto_accept_delay_set, "%llu\n");
252
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700253static int idle_timeout_set(void *data, u64 val)
254{
255 struct hci_dev *hdev = data;
256
257 if (val != 0 && (val < 500 || val > 3600000))
258 return -EINVAL;
259
260 hci_dev_lock(hdev);
261 hdev->idle_timeout= val;
262 hci_dev_unlock(hdev);
263
264 return 0;
265}
266
267static int idle_timeout_get(void *data, u64 *val)
268{
269 struct hci_dev *hdev = data;
270
271 hci_dev_lock(hdev);
272 *val = hdev->idle_timeout;
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
279 idle_timeout_set, "%llu\n");
280
281static int sniff_min_interval_set(void *data, u64 val)
282{
283 struct hci_dev *hdev = data;
284
285 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
286 return -EINVAL;
287
288 hci_dev_lock(hdev);
289 hdev->sniff_min_interval= val;
290 hci_dev_unlock(hdev);
291
292 return 0;
293}
294
295static int sniff_min_interval_get(void *data, u64 *val)
296{
297 struct hci_dev *hdev = data;
298
299 hci_dev_lock(hdev);
300 *val = hdev->sniff_min_interval;
301 hci_dev_unlock(hdev);
302
303 return 0;
304}
305
306DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
307 sniff_min_interval_set, "%llu\n");
308
309static int sniff_max_interval_set(void *data, u64 val)
310{
311 struct hci_dev *hdev = data;
312
313 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
314 return -EINVAL;
315
316 hci_dev_lock(hdev);
317 hdev->sniff_max_interval= val;
318 hci_dev_unlock(hdev);
319
320 return 0;
321}
322
323static int sniff_max_interval_get(void *data, u64 *val)
324{
325 struct hci_dev *hdev = data;
326
327 hci_dev_lock(hdev);
328 *val = hdev->sniff_max_interval;
329 hci_dev_unlock(hdev);
330
331 return 0;
332}
333
334DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
335 sniff_max_interval_set, "%llu\n");
336
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700337static int static_address_show(struct seq_file *f, void *p)
338{
339 struct hci_dev *hdev = f->private;
340
341 hci_dev_lock(hdev);
342 seq_printf(f, "%pMR\n", &hdev->static_addr);
343 hci_dev_unlock(hdev);
344
345 return 0;
346}
347
348static int static_address_open(struct inode *inode, struct file *file)
349{
350 return single_open(file, static_address_show, inode->i_private);
351}
352
353static const struct file_operations static_address_fops = {
354 .open = static_address_open,
355 .read = seq_read,
356 .llseek = seq_lseek,
357 .release = single_release,
358};
359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360/* ---- HCI requests ---- */
361
Johan Hedberg42c6b122013-03-05 20:37:49 +0200362static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200364 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
366 if (hdev->req_status == HCI_REQ_PEND) {
367 hdev->req_result = result;
368 hdev->req_status = HCI_REQ_DONE;
369 wake_up_interruptible(&hdev->req_wait_q);
370 }
371}
372
373static void hci_req_cancel(struct hci_dev *hdev, int err)
374{
375 BT_DBG("%s err 0x%2.2x", hdev->name, err);
376
377 if (hdev->req_status == HCI_REQ_PEND) {
378 hdev->req_result = err;
379 hdev->req_status = HCI_REQ_CANCELED;
380 wake_up_interruptible(&hdev->req_wait_q);
381 }
382}
383
Fengguang Wu77a63e02013-04-20 16:24:31 +0300384static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
385 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300386{
387 struct hci_ev_cmd_complete *ev;
388 struct hci_event_hdr *hdr;
389 struct sk_buff *skb;
390
391 hci_dev_lock(hdev);
392
393 skb = hdev->recv_evt;
394 hdev->recv_evt = NULL;
395
396 hci_dev_unlock(hdev);
397
398 if (!skb)
399 return ERR_PTR(-ENODATA);
400
401 if (skb->len < sizeof(*hdr)) {
402 BT_ERR("Too short HCI event");
403 goto failed;
404 }
405
406 hdr = (void *) skb->data;
407 skb_pull(skb, HCI_EVENT_HDR_SIZE);
408
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300409 if (event) {
410 if (hdr->evt != event)
411 goto failed;
412 return skb;
413 }
414
Johan Hedberg75e84b72013-04-02 13:35:04 +0300415 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
416 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
417 goto failed;
418 }
419
420 if (skb->len < sizeof(*ev)) {
421 BT_ERR("Too short cmd_complete event");
422 goto failed;
423 }
424
425 ev = (void *) skb->data;
426 skb_pull(skb, sizeof(*ev));
427
428 if (opcode == __le16_to_cpu(ev->opcode))
429 return skb;
430
431 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
432 __le16_to_cpu(ev->opcode));
433
434failed:
435 kfree_skb(skb);
436 return ERR_PTR(-ENODATA);
437}
438
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300439struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300440 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300441{
442 DECLARE_WAITQUEUE(wait, current);
443 struct hci_request req;
444 int err = 0;
445
446 BT_DBG("%s", hdev->name);
447
448 hci_req_init(&req, hdev);
449
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300450 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300451
452 hdev->req_status = HCI_REQ_PEND;
453
454 err = hci_req_run(&req, hci_req_sync_complete);
455 if (err < 0)
456 return ERR_PTR(err);
457
458 add_wait_queue(&hdev->req_wait_q, &wait);
459 set_current_state(TASK_INTERRUPTIBLE);
460
461 schedule_timeout(timeout);
462
463 remove_wait_queue(&hdev->req_wait_q, &wait);
464
465 if (signal_pending(current))
466 return ERR_PTR(-EINTR);
467
468 switch (hdev->req_status) {
469 case HCI_REQ_DONE:
470 err = -bt_to_errno(hdev->req_result);
471 break;
472
473 case HCI_REQ_CANCELED:
474 err = -hdev->req_result;
475 break;
476
477 default:
478 err = -ETIMEDOUT;
479 break;
480 }
481
482 hdev->req_status = hdev->req_result = 0;
483
484 BT_DBG("%s end: err %d", hdev->name, err);
485
486 if (err < 0)
487 return ERR_PTR(err);
488
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300489 return hci_get_cmd_complete(hdev, opcode, event);
490}
491EXPORT_SYMBOL(__hci_cmd_sync_ev);
492
493struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300494 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300495{
496 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300497}
498EXPORT_SYMBOL(__hci_cmd_sync);
499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200501static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 void (*func)(struct hci_request *req,
503 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200504 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 DECLARE_WAITQUEUE(wait, current);
508 int err = 0;
509
510 BT_DBG("%s start", hdev->name);
511
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 hci_req_init(&req, hdev);
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 hdev->req_status = HCI_REQ_PEND;
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200517
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 err = hci_req_run(&req, hci_req_sync_complete);
519 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200520 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300521
522 /* ENODATA means the HCI request command queue is empty.
523 * This can happen when a request with conditionals doesn't
524 * trigger any commands to be sent. This is normal behavior
525 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 */
Andre Guedes920c8302013-03-08 11:20:15 -0300527 if (err == -ENODATA)
528 return 0;
529
530 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200531 }
532
Andre Guedesbc4445c2013-03-08 11:20:13 -0300533 add_wait_queue(&hdev->req_wait_q, &wait);
534 set_current_state(TASK_INTERRUPTIBLE);
535
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 schedule_timeout(timeout);
537
538 remove_wait_queue(&hdev->req_wait_q, &wait);
539
540 if (signal_pending(current))
541 return -EINTR;
542
543 switch (hdev->req_status) {
544 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700545 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 break;
547
548 case HCI_REQ_CANCELED:
549 err = -hdev->req_result;
550 break;
551
552 default:
553 err = -ETIMEDOUT;
554 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700555 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Johan Hedberga5040ef2011-01-10 13:28:59 +0200557 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
559 BT_DBG("%s end: err %d", hdev->name, err);
560
561 return err;
562}
563
Johan Hedberg01178cd2013-03-05 20:37:41 +0200564static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 void (*req)(struct hci_request *req,
566 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200567 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
569 int ret;
570
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200571 if (!test_bit(HCI_UP, &hdev->flags))
572 return -ENETDOWN;
573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 /* Serialize all requests */
575 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200576 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 hci_req_unlock(hdev);
578
579 return ret;
580}
581
Johan Hedberg42c6b122013-03-05 20:37:49 +0200582static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200584 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
586 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587 set_bit(HCI_RESET, &req->hdev->flags);
588 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589}
590
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200596 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200598 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600
601 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200602 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
604
Johan Hedberg42c6b122013-03-05 20:37:49 +0200605static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200606{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200607 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200608
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200609 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300611
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700612 /* Read Local Supported Commands */
613 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
614
615 /* Read Local Supported Features */
616 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
617
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300618 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300620
621 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200622 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700623
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700624 /* Read Flow Control Mode */
625 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
626
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700627 /* Read Location Data */
628 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200629}
630
Johan Hedberg42c6b122013-03-05 20:37:49 +0200631static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200632{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200633 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200634
635 BT_DBG("%s %ld", hdev->name, opt);
636
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300637 /* Reset */
638 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200639 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300640
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200641 switch (hdev->dev_type) {
642 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200644 break;
645
646 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200647 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200648 break;
649
650 default:
651 BT_ERR("Unknown device type %d", hdev->dev_type);
652 break;
653 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200654}
655
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700658 struct hci_dev *hdev = req->hdev;
659
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660 __le16 param;
661 __u8 flt_type;
662
663 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200665
666 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200668
669 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200670 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200671
672 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200674
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700675 /* Read Number of Supported IAC */
676 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
677
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700678 /* Read Current IAC LAP */
679 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
680
Johan Hedberg2177bab2013-03-05 20:37:43 +0200681 /* Clear Event Filters */
682 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200684
685 /* Connection accept timeout ~20 secs */
686 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200687 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200688
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700689 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
690 * but it does not support page scan related HCI commands.
691 */
692 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500693 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
694 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
695 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200696}
697
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200699{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300700 struct hci_dev *hdev = req->hdev;
701
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200704
705 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200706 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200707
708 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200709 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200710
711 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200712 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200713
714 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200715 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300716
717 /* LE-only controllers have LE implicitly enabled */
718 if (!lmp_bredr_capable(hdev))
719 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200720}
721
722static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
723{
724 if (lmp_ext_inq_capable(hdev))
725 return 0x02;
726
727 if (lmp_inq_rssi_capable(hdev))
728 return 0x01;
729
730 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
731 hdev->lmp_subver == 0x0757)
732 return 0x01;
733
734 if (hdev->manufacturer == 15) {
735 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
736 return 0x01;
737 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
738 return 0x01;
739 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
740 return 0x01;
741 }
742
743 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
744 hdev->lmp_subver == 0x1805)
745 return 0x01;
746
747 return 0x00;
748}
749
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200751{
752 u8 mode;
753
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200755
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200757}
758
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200760{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200761 struct hci_dev *hdev = req->hdev;
762
Johan Hedberg2177bab2013-03-05 20:37:43 +0200763 /* The second byte is 0xff instead of 0x9f (two reserved bits
764 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
765 * command otherwise.
766 */
767 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
768
769 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
770 * any event mask for pre 1.2 devices.
771 */
772 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
773 return;
774
775 if (lmp_bredr_capable(hdev)) {
776 events[4] |= 0x01; /* Flow Specification Complete */
777 events[4] |= 0x02; /* Inquiry Result with RSSI */
778 events[4] |= 0x04; /* Read Remote Extended Features Complete */
779 events[5] |= 0x08; /* Synchronous Connection Complete */
780 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700781 } else {
782 /* Use a different default for LE-only devices */
783 memset(events, 0, sizeof(events));
784 events[0] |= 0x10; /* Disconnection Complete */
785 events[0] |= 0x80; /* Encryption Change */
786 events[1] |= 0x08; /* Read Remote Version Information Complete */
787 events[1] |= 0x20; /* Command Complete */
788 events[1] |= 0x40; /* Command Status */
789 events[1] |= 0x80; /* Hardware Error */
790 events[2] |= 0x04; /* Number of Completed Packets */
791 events[3] |= 0x02; /* Data Buffer Overflow */
792 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200793 }
794
795 if (lmp_inq_rssi_capable(hdev))
796 events[4] |= 0x02; /* Inquiry Result with RSSI */
797
798 if (lmp_sniffsubr_capable(hdev))
799 events[5] |= 0x20; /* Sniff Subrating */
800
801 if (lmp_pause_enc_capable(hdev))
802 events[5] |= 0x80; /* Encryption Key Refresh Complete */
803
804 if (lmp_ext_inq_capable(hdev))
805 events[5] |= 0x40; /* Extended Inquiry Result */
806
807 if (lmp_no_flush_capable(hdev))
808 events[7] |= 0x01; /* Enhanced Flush Complete */
809
810 if (lmp_lsto_capable(hdev))
811 events[6] |= 0x80; /* Link Supervision Timeout Changed */
812
813 if (lmp_ssp_capable(hdev)) {
814 events[6] |= 0x01; /* IO Capability Request */
815 events[6] |= 0x02; /* IO Capability Response */
816 events[6] |= 0x04; /* User Confirmation Request */
817 events[6] |= 0x08; /* User Passkey Request */
818 events[6] |= 0x10; /* Remote OOB Data Request */
819 events[6] |= 0x20; /* Simple Pairing Complete */
820 events[7] |= 0x04; /* User Passkey Notification */
821 events[7] |= 0x08; /* Keypress Notification */
822 events[7] |= 0x10; /* Remote Host Supported
823 * Features Notification
824 */
825 }
826
827 if (lmp_le_capable(hdev))
828 events[7] |= 0x20; /* LE Meta-Event */
829
Johan Hedberg42c6b122013-03-05 20:37:49 +0200830 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200831
832 if (lmp_le_capable(hdev)) {
833 memset(events, 0, sizeof(events));
834 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200835 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
836 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200837 }
838}
839
Johan Hedberg42c6b122013-03-05 20:37:49 +0200840static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200841{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200842 struct hci_dev *hdev = req->hdev;
843
Johan Hedberg2177bab2013-03-05 20:37:43 +0200844 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200845 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300846 else
847 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200848
849 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200850 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200851
Johan Hedberg42c6b122013-03-05 20:37:49 +0200852 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200853
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300854 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
855 * local supported commands HCI command.
856 */
857 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200858 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200859
860 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700861 /* When SSP is available, then the host features page
862 * should also be available as well. However some
863 * controllers list the max_page as 0 as long as SSP
864 * has not been enabled. To achieve proper debugging
865 * output, force the minimum max_page to 1 at least.
866 */
867 hdev->max_page = 0x01;
868
Johan Hedberg2177bab2013-03-05 20:37:43 +0200869 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
870 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200871 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
872 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200873 } else {
874 struct hci_cp_write_eir cp;
875
876 memset(hdev->eir, 0, sizeof(hdev->eir));
877 memset(&cp, 0, sizeof(cp));
878
Johan Hedberg42c6b122013-03-05 20:37:49 +0200879 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200880 }
881 }
882
883 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200884 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200885
886 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200888
889 if (lmp_ext_feat_capable(hdev)) {
890 struct hci_cp_read_local_ext_features cp;
891
892 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200893 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
894 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200895 }
896
897 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
898 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200899 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
900 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200901 }
902}
903
Johan Hedberg42c6b122013-03-05 20:37:49 +0200904static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200905{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200906 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200907 struct hci_cp_write_def_link_policy cp;
908 u16 link_policy = 0;
909
910 if (lmp_rswitch_capable(hdev))
911 link_policy |= HCI_LP_RSWITCH;
912 if (lmp_hold_capable(hdev))
913 link_policy |= HCI_LP_HOLD;
914 if (lmp_sniff_capable(hdev))
915 link_policy |= HCI_LP_SNIFF;
916 if (lmp_park_capable(hdev))
917 link_policy |= HCI_LP_PARK;
918
919 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200920 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200921}
922
Johan Hedberg42c6b122013-03-05 20:37:49 +0200923static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200924{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200925 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200926 struct hci_cp_write_le_host_supported cp;
927
Johan Hedbergc73eee92013-04-19 18:35:21 +0300928 /* LE-only devices do not support explicit enablement */
929 if (!lmp_bredr_capable(hdev))
930 return;
931
Johan Hedberg2177bab2013-03-05 20:37:43 +0200932 memset(&cp, 0, sizeof(cp));
933
934 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
935 cp.le = 0x01;
936 cp.simul = lmp_le_br_capable(hdev);
937 }
938
939 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200940 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
941 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200942}
943
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300944static void hci_set_event_mask_page_2(struct hci_request *req)
945{
946 struct hci_dev *hdev = req->hdev;
947 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
948
949 /* If Connectionless Slave Broadcast master role is supported
950 * enable all necessary events for it.
951 */
952 if (hdev->features[2][0] & 0x01) {
953 events[1] |= 0x40; /* Triggered Clock Capture */
954 events[1] |= 0x80; /* Synchronization Train Complete */
955 events[2] |= 0x10; /* Slave Page Response Timeout */
956 events[2] |= 0x20; /* CSB Channel Map Change */
957 }
958
959 /* If Connectionless Slave Broadcast slave role is supported
960 * enable all necessary events for it.
961 */
962 if (hdev->features[2][0] & 0x02) {
963 events[2] |= 0x01; /* Synchronization Train Received */
964 events[2] |= 0x02; /* CSB Receive */
965 events[2] |= 0x04; /* CSB Timeout */
966 events[2] |= 0x08; /* Truncated Page Complete */
967 }
968
969 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
970}
971
Johan Hedberg42c6b122013-03-05 20:37:49 +0200972static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200973{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200974 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300975 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200976
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100977 /* Some Broadcom based Bluetooth controllers do not support the
978 * Delete Stored Link Key command. They are clearly indicating its
979 * absence in the bit mask of supported commands.
980 *
981 * Check the supported commands and only if the the command is marked
982 * as supported send it. If not supported assume that the controller
983 * does not have actual support for stored link keys which makes this
984 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700985 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300986 if (hdev->commands[6] & 0x80) {
987 struct hci_cp_delete_stored_link_key cp;
988
989 bacpy(&cp.bdaddr, BDADDR_ANY);
990 cp.delete_all = 0x01;
991 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
992 sizeof(cp), &cp);
993 }
994
Johan Hedberg2177bab2013-03-05 20:37:43 +0200995 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200996 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200997
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700998 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200999 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001000
1001 /* Read features beyond page 1 if available */
1002 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1003 struct hci_cp_read_local_ext_features cp;
1004
1005 cp.page = p;
1006 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1007 sizeof(cp), &cp);
1008 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001009}
1010
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001011static void hci_init4_req(struct hci_request *req, unsigned long opt)
1012{
1013 struct hci_dev *hdev = req->hdev;
1014
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001015 /* Set event mask page 2 if the HCI command for it is supported */
1016 if (hdev->commands[22] & 0x04)
1017 hci_set_event_mask_page_2(req);
1018
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001019 /* Check for Synchronization Train support */
1020 if (hdev->features[2][0] & 0x04)
1021 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1022}
1023
Johan Hedberg2177bab2013-03-05 20:37:43 +02001024static int __hci_init(struct hci_dev *hdev)
1025{
1026 int err;
1027
1028 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1029 if (err < 0)
1030 return err;
1031
1032 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1033 * BR/EDR/LE type controllers. AMP controllers only need the
1034 * first stage init.
1035 */
1036 if (hdev->dev_type != HCI_BREDR)
1037 return 0;
1038
1039 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1040 if (err < 0)
1041 return err;
1042
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001043 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1044 if (err < 0)
1045 return err;
1046
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001047 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1048 if (err < 0)
1049 return err;
1050
1051 /* Only create debugfs entries during the initial setup
1052 * phase and not every time the controller gets powered on.
1053 */
1054 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1055 return 0;
1056
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001057 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1058 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001059 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1060 &hdev->manufacturer);
1061 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1062 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001063 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1064 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001065 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1066
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001067 if (lmp_bredr_capable(hdev)) {
1068 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1069 hdev, &inquiry_cache_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001070 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1071 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001072 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1073 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001074 }
1075
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001076 if (lmp_ssp_capable(hdev))
1077 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1078 hdev, &auto_accept_delay_fops);
1079
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001080 if (lmp_sniff_capable(hdev)) {
1081 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1082 hdev, &idle_timeout_fops);
1083 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1084 hdev, &sniff_min_interval_fops);
1085 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1086 hdev, &sniff_max_interval_fops);
1087 }
1088
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001089 if (lmp_le_capable(hdev))
1090 debugfs_create_file("static_address", 0444, hdev->debugfs,
1091 hdev, &static_address_fops);
1092
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001093 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001094}
1095
Johan Hedberg42c6b122013-03-05 20:37:49 +02001096static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097{
1098 __u8 scan = opt;
1099
Johan Hedberg42c6b122013-03-05 20:37:49 +02001100 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001103 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104}
1105
Johan Hedberg42c6b122013-03-05 20:37:49 +02001106static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
1108 __u8 auth = opt;
1109
Johan Hedberg42c6b122013-03-05 20:37:49 +02001110 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
1112 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001113 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114}
1115
Johan Hedberg42c6b122013-03-05 20:37:49 +02001116static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117{
1118 __u8 encrypt = opt;
1119
Johan Hedberg42c6b122013-03-05 20:37:49 +02001120 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001122 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001123 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124}
1125
Johan Hedberg42c6b122013-03-05 20:37:49 +02001126static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001127{
1128 __le16 policy = cpu_to_le16(opt);
1129
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001131
1132 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001133 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001134}
1135
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001136/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 * Device is held on return. */
1138struct hci_dev *hci_dev_get(int index)
1139{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001140 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
1142 BT_DBG("%d", index);
1143
1144 if (index < 0)
1145 return NULL;
1146
1147 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001148 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 if (d->id == index) {
1150 hdev = hci_dev_hold(d);
1151 break;
1152 }
1153 }
1154 read_unlock(&hci_dev_list_lock);
1155 return hdev;
1156}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
1158/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001159
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001160bool hci_discovery_active(struct hci_dev *hdev)
1161{
1162 struct discovery_state *discov = &hdev->discovery;
1163
Andre Guedes6fbe1952012-02-03 17:47:58 -03001164 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001165 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001166 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001167 return true;
1168
Andre Guedes6fbe1952012-02-03 17:47:58 -03001169 default:
1170 return false;
1171 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001172}
1173
Johan Hedbergff9ef572012-01-04 14:23:45 +02001174void hci_discovery_set_state(struct hci_dev *hdev, int state)
1175{
1176 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1177
1178 if (hdev->discovery.state == state)
1179 return;
1180
1181 switch (state) {
1182 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001183 if (hdev->discovery.state != DISCOVERY_STARTING)
1184 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001185 break;
1186 case DISCOVERY_STARTING:
1187 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001188 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001189 mgmt_discovering(hdev, 1);
1190 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001191 case DISCOVERY_RESOLVING:
1192 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001193 case DISCOVERY_STOPPING:
1194 break;
1195 }
1196
1197 hdev->discovery.state = state;
1198}
1199
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001200void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201{
Johan Hedberg30883512012-01-04 14:16:21 +02001202 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001203 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Johan Hedberg561aafb2012-01-04 13:31:59 +02001205 list_for_each_entry_safe(p, n, &cache->all, all) {
1206 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001207 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001209
1210 INIT_LIST_HEAD(&cache->unknown);
1211 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212}
1213
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001214struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1215 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216{
Johan Hedberg30883512012-01-04 14:16:21 +02001217 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 struct inquiry_entry *e;
1219
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001220 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Johan Hedberg561aafb2012-01-04 13:31:59 +02001222 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001224 return e;
1225 }
1226
1227 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228}
1229
Johan Hedberg561aafb2012-01-04 13:31:59 +02001230struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001231 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001232{
Johan Hedberg30883512012-01-04 14:16:21 +02001233 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001234 struct inquiry_entry *e;
1235
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001236 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001237
1238 list_for_each_entry(e, &cache->unknown, list) {
1239 if (!bacmp(&e->data.bdaddr, bdaddr))
1240 return e;
1241 }
1242
1243 return NULL;
1244}
1245
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001246struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001247 bdaddr_t *bdaddr,
1248 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001249{
1250 struct discovery_state *cache = &hdev->discovery;
1251 struct inquiry_entry *e;
1252
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001253 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001254
1255 list_for_each_entry(e, &cache->resolve, list) {
1256 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1257 return e;
1258 if (!bacmp(&e->data.bdaddr, bdaddr))
1259 return e;
1260 }
1261
1262 return NULL;
1263}
1264
Johan Hedberga3d4e202012-01-09 00:53:02 +02001265void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001266 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001267{
1268 struct discovery_state *cache = &hdev->discovery;
1269 struct list_head *pos = &cache->resolve;
1270 struct inquiry_entry *p;
1271
1272 list_del(&ie->list);
1273
1274 list_for_each_entry(p, &cache->resolve, list) {
1275 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001276 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001277 break;
1278 pos = &p->list;
1279 }
1280
1281 list_add(&ie->list, pos);
1282}
1283
Johan Hedberg31754052012-01-04 13:39:52 +02001284bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001285 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
Johan Hedberg30883512012-01-04 14:16:21 +02001287 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001288 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001290 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Szymon Janc2b2fec42012-11-20 11:38:54 +01001292 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1293
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001294 if (ssp)
1295 *ssp = data->ssp_mode;
1296
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001297 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001298 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001299 if (ie->data.ssp_mode && ssp)
1300 *ssp = true;
1301
Johan Hedberga3d4e202012-01-09 00:53:02 +02001302 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001303 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001304 ie->data.rssi = data->rssi;
1305 hci_inquiry_cache_update_resolve(hdev, ie);
1306 }
1307
Johan Hedberg561aafb2012-01-04 13:31:59 +02001308 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001309 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001310
Johan Hedberg561aafb2012-01-04 13:31:59 +02001311 /* Entry not in the cache. Add new one. */
1312 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1313 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001314 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001315
1316 list_add(&ie->all, &cache->all);
1317
1318 if (name_known) {
1319 ie->name_state = NAME_KNOWN;
1320 } else {
1321 ie->name_state = NAME_NOT_KNOWN;
1322 list_add(&ie->list, &cache->unknown);
1323 }
1324
1325update:
1326 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001327 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001328 ie->name_state = NAME_KNOWN;
1329 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 }
1331
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001332 memcpy(&ie->data, data, sizeof(*data));
1333 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001335
1336 if (ie->name_state == NAME_NOT_KNOWN)
1337 return false;
1338
1339 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340}
1341
1342static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1343{
Johan Hedberg30883512012-01-04 14:16:21 +02001344 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 struct inquiry_info *info = (struct inquiry_info *) buf;
1346 struct inquiry_entry *e;
1347 int copied = 0;
1348
Johan Hedberg561aafb2012-01-04 13:31:59 +02001349 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001351
1352 if (copied >= num)
1353 break;
1354
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 bacpy(&info->bdaddr, &data->bdaddr);
1356 info->pscan_rep_mode = data->pscan_rep_mode;
1357 info->pscan_period_mode = data->pscan_period_mode;
1358 info->pscan_mode = data->pscan_mode;
1359 memcpy(info->dev_class, data->dev_class, 3);
1360 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001361
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001363 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 }
1365
1366 BT_DBG("cache %p, copied %d", cache, copied);
1367 return copied;
1368}
1369
Johan Hedberg42c6b122013-03-05 20:37:49 +02001370static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371{
1372 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 struct hci_cp_inquiry cp;
1375
1376 BT_DBG("%s", hdev->name);
1377
1378 if (test_bit(HCI_INQUIRY, &hdev->flags))
1379 return;
1380
1381 /* Start Inquiry */
1382 memcpy(&cp.lap, &ir->lap, 3);
1383 cp.length = ir->length;
1384 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001385 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386}
1387
Andre Guedes3e13fa12013-03-27 20:04:56 -03001388static int wait_inquiry(void *word)
1389{
1390 schedule();
1391 return signal_pending(current);
1392}
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394int hci_inquiry(void __user *arg)
1395{
1396 __u8 __user *ptr = arg;
1397 struct hci_inquiry_req ir;
1398 struct hci_dev *hdev;
1399 int err = 0, do_inquiry = 0, max_rsp;
1400 long timeo;
1401 __u8 *buf;
1402
1403 if (copy_from_user(&ir, ptr, sizeof(ir)))
1404 return -EFAULT;
1405
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001406 hdev = hci_dev_get(ir.dev_id);
1407 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 return -ENODEV;
1409
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001410 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1411 err = -EBUSY;
1412 goto done;
1413 }
1414
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001415 if (hdev->dev_type != HCI_BREDR) {
1416 err = -EOPNOTSUPP;
1417 goto done;
1418 }
1419
Johan Hedberg56f87902013-10-02 13:43:13 +03001420 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1421 err = -EOPNOTSUPP;
1422 goto done;
1423 }
1424
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001425 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001426 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001427 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001428 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 do_inquiry = 1;
1430 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001431 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Marcel Holtmann04837f62006-07-03 10:02:33 +02001433 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001434
1435 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001436 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1437 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001438 if (err < 0)
1439 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001440
1441 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1442 * cleared). If it is interrupted by a signal, return -EINTR.
1443 */
1444 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1445 TASK_INTERRUPTIBLE))
1446 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001449 /* for unlimited number of responses we will use buffer with
1450 * 255 entries
1451 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1453
1454 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1455 * copy it to the user space.
1456 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001457 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001458 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 err = -ENOMEM;
1460 goto done;
1461 }
1462
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001463 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001465 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
1467 BT_DBG("num_rsp %d", ir.num_rsp);
1468
1469 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1470 ptr += sizeof(ir);
1471 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001472 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001474 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 err = -EFAULT;
1476
1477 kfree(buf);
1478
1479done:
1480 hci_dev_put(hdev);
1481 return err;
1482}
1483
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001484static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 int ret = 0;
1487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 BT_DBG("%s %p", hdev->name, hdev);
1489
1490 hci_req_lock(hdev);
1491
Johan Hovold94324962012-03-15 14:48:41 +01001492 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1493 ret = -ENODEV;
1494 goto done;
1495 }
1496
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001497 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1498 /* Check for rfkill but allow the HCI setup stage to
1499 * proceed (which in itself doesn't cause any RF activity).
1500 */
1501 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1502 ret = -ERFKILL;
1503 goto done;
1504 }
1505
1506 /* Check for valid public address or a configured static
1507 * random adddress, but let the HCI setup proceed to
1508 * be able to determine if there is a public address
1509 * or not.
1510 *
1511 * This check is only valid for BR/EDR controllers
1512 * since AMP controllers do not have an address.
1513 */
1514 if (hdev->dev_type == HCI_BREDR &&
1515 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1516 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1517 ret = -EADDRNOTAVAIL;
1518 goto done;
1519 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001520 }
1521
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 if (test_bit(HCI_UP, &hdev->flags)) {
1523 ret = -EALREADY;
1524 goto done;
1525 }
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 if (hdev->open(hdev)) {
1528 ret = -EIO;
1529 goto done;
1530 }
1531
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001532 atomic_set(&hdev->cmd_cnt, 1);
1533 set_bit(HCI_INIT, &hdev->flags);
1534
1535 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1536 ret = hdev->setup(hdev);
1537
1538 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001539 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1540 set_bit(HCI_RAW, &hdev->flags);
1541
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001542 if (!test_bit(HCI_RAW, &hdev->flags) &&
1543 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001544 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 }
1546
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001547 clear_bit(HCI_INIT, &hdev->flags);
1548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 if (!ret) {
1550 hci_dev_hold(hdev);
1551 set_bit(HCI_UP, &hdev->flags);
1552 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001553 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001554 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001555 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001556 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001557 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001558 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001559 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001560 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001562 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001563 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001564 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
1566 skb_queue_purge(&hdev->cmd_q);
1567 skb_queue_purge(&hdev->rx_q);
1568
1569 if (hdev->flush)
1570 hdev->flush(hdev);
1571
1572 if (hdev->sent_cmd) {
1573 kfree_skb(hdev->sent_cmd);
1574 hdev->sent_cmd = NULL;
1575 }
1576
1577 hdev->close(hdev);
1578 hdev->flags = 0;
1579 }
1580
1581done:
1582 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return ret;
1584}
1585
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001586/* ---- HCI ioctl helpers ---- */
1587
1588int hci_dev_open(__u16 dev)
1589{
1590 struct hci_dev *hdev;
1591 int err;
1592
1593 hdev = hci_dev_get(dev);
1594 if (!hdev)
1595 return -ENODEV;
1596
Johan Hedberge1d08f42013-10-01 22:44:50 +03001597 /* We need to ensure that no other power on/off work is pending
1598 * before proceeding to call hci_dev_do_open. This is
1599 * particularly important if the setup procedure has not yet
1600 * completed.
1601 */
1602 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1603 cancel_delayed_work(&hdev->power_off);
1604
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001605 /* After this call it is guaranteed that the setup procedure
1606 * has finished. This means that error conditions like RFKILL
1607 * or no valid public or static random address apply.
1608 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001609 flush_workqueue(hdev->req_workqueue);
1610
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001611 err = hci_dev_do_open(hdev);
1612
1613 hci_dev_put(hdev);
1614
1615 return err;
1616}
1617
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618static int hci_dev_do_close(struct hci_dev *hdev)
1619{
1620 BT_DBG("%s %p", hdev->name, hdev);
1621
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001622 cancel_delayed_work(&hdev->power_off);
1623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 hci_req_cancel(hdev, ENODEV);
1625 hci_req_lock(hdev);
1626
1627 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001628 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 hci_req_unlock(hdev);
1630 return 0;
1631 }
1632
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001633 /* Flush RX and TX works */
1634 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001635 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001637 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001638 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001639 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001640 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001641 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001642 }
1643
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001644 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001645 cancel_delayed_work(&hdev->service_cache);
1646
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001647 cancel_delayed_work_sync(&hdev->le_scan_disable);
1648
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001649 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001650 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001652 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
1654 hci_notify(hdev, HCI_DEV_DOWN);
1655
1656 if (hdev->flush)
1657 hdev->flush(hdev);
1658
1659 /* Reset device */
1660 skb_queue_purge(&hdev->cmd_q);
1661 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001662 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001663 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001664 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001666 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 clear_bit(HCI_INIT, &hdev->flags);
1668 }
1669
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001670 /* flush cmd work */
1671 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672
1673 /* Drop queues */
1674 skb_queue_purge(&hdev->rx_q);
1675 skb_queue_purge(&hdev->cmd_q);
1676 skb_queue_purge(&hdev->raw_q);
1677
1678 /* Drop last sent command */
1679 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001680 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 kfree_skb(hdev->sent_cmd);
1682 hdev->sent_cmd = NULL;
1683 }
1684
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001685 kfree_skb(hdev->recv_evt);
1686 hdev->recv_evt = NULL;
1687
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 /* After this point our queues are empty
1689 * and no tasks are scheduled. */
1690 hdev->close(hdev);
1691
Johan Hedberg35b973c2013-03-15 17:06:59 -05001692 /* Clear flags */
1693 hdev->flags = 0;
1694 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1695
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001696 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1697 if (hdev->dev_type == HCI_BREDR) {
1698 hci_dev_lock(hdev);
1699 mgmt_powered(hdev, 0);
1700 hci_dev_unlock(hdev);
1701 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001702 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001703
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001704 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001705 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001706
Johan Hedberge59fda82012-02-22 18:11:53 +02001707 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001708 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 hci_req_unlock(hdev);
1711
1712 hci_dev_put(hdev);
1713 return 0;
1714}
1715
1716int hci_dev_close(__u16 dev)
1717{
1718 struct hci_dev *hdev;
1719 int err;
1720
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001721 hdev = hci_dev_get(dev);
1722 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001724
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001725 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1726 err = -EBUSY;
1727 goto done;
1728 }
1729
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001730 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1731 cancel_delayed_work(&hdev->power_off);
1732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001734
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001735done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 hci_dev_put(hdev);
1737 return err;
1738}
1739
1740int hci_dev_reset(__u16 dev)
1741{
1742 struct hci_dev *hdev;
1743 int ret = 0;
1744
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001745 hdev = hci_dev_get(dev);
1746 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 return -ENODEV;
1748
1749 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
Marcel Holtmann808a0492013-08-26 20:57:58 -07001751 if (!test_bit(HCI_UP, &hdev->flags)) {
1752 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001756 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1757 ret = -EBUSY;
1758 goto done;
1759 }
1760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 /* Drop queues */
1762 skb_queue_purge(&hdev->rx_q);
1763 skb_queue_purge(&hdev->cmd_q);
1764
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001765 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001766 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001768 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
1770 if (hdev->flush)
1771 hdev->flush(hdev);
1772
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001773 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001774 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
1776 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001777 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
1779done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 hci_req_unlock(hdev);
1781 hci_dev_put(hdev);
1782 return ret;
1783}
1784
1785int hci_dev_reset_stat(__u16 dev)
1786{
1787 struct hci_dev *hdev;
1788 int ret = 0;
1789
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001790 hdev = hci_dev_get(dev);
1791 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 return -ENODEV;
1793
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001794 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1795 ret = -EBUSY;
1796 goto done;
1797 }
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1800
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001801done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 return ret;
1804}
1805
1806int hci_dev_cmd(unsigned int cmd, void __user *arg)
1807{
1808 struct hci_dev *hdev;
1809 struct hci_dev_req dr;
1810 int err = 0;
1811
1812 if (copy_from_user(&dr, arg, sizeof(dr)))
1813 return -EFAULT;
1814
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001815 hdev = hci_dev_get(dr.dev_id);
1816 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 return -ENODEV;
1818
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001819 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1820 err = -EBUSY;
1821 goto done;
1822 }
1823
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001824 if (hdev->dev_type != HCI_BREDR) {
1825 err = -EOPNOTSUPP;
1826 goto done;
1827 }
1828
Johan Hedberg56f87902013-10-02 13:43:13 +03001829 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1830 err = -EOPNOTSUPP;
1831 goto done;
1832 }
1833
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 switch (cmd) {
1835 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001836 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1837 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 break;
1839
1840 case HCISETENCRYPT:
1841 if (!lmp_encrypt_capable(hdev)) {
1842 err = -EOPNOTSUPP;
1843 break;
1844 }
1845
1846 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1847 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001848 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1849 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 if (err)
1851 break;
1852 }
1853
Johan Hedberg01178cd2013-03-05 20:37:41 +02001854 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1855 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 break;
1857
1858 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001859 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1860 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 break;
1862
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001863 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001864 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1865 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001866 break;
1867
1868 case HCISETLINKMODE:
1869 hdev->link_mode = ((__u16) dr.dev_opt) &
1870 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1871 break;
1872
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 case HCISETPTYPE:
1874 hdev->pkt_type = (__u16) dr.dev_opt;
1875 break;
1876
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001878 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1879 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 break;
1881
1882 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001883 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1884 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 break;
1886
1887 default:
1888 err = -EINVAL;
1889 break;
1890 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001891
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001892done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 hci_dev_put(hdev);
1894 return err;
1895}
1896
1897int hci_get_dev_list(void __user *arg)
1898{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001899 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 struct hci_dev_list_req *dl;
1901 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 int n = 0, size, err;
1903 __u16 dev_num;
1904
1905 if (get_user(dev_num, (__u16 __user *) arg))
1906 return -EFAULT;
1907
1908 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1909 return -EINVAL;
1910
1911 size = sizeof(*dl) + dev_num * sizeof(*dr);
1912
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001913 dl = kzalloc(size, GFP_KERNEL);
1914 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 return -ENOMEM;
1916
1917 dr = dl->dev_req;
1918
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001919 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001920 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001921 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001922 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001923
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001924 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1925 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001926
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 (dr + n)->dev_id = hdev->id;
1928 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001929
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 if (++n >= dev_num)
1931 break;
1932 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001933 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
1935 dl->dev_num = n;
1936 size = sizeof(*dl) + n * sizeof(*dr);
1937
1938 err = copy_to_user(arg, dl, size);
1939 kfree(dl);
1940
1941 return err ? -EFAULT : 0;
1942}
1943
1944int hci_get_dev_info(void __user *arg)
1945{
1946 struct hci_dev *hdev;
1947 struct hci_dev_info di;
1948 int err = 0;
1949
1950 if (copy_from_user(&di, arg, sizeof(di)))
1951 return -EFAULT;
1952
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001953 hdev = hci_dev_get(di.dev_id);
1954 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 return -ENODEV;
1956
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001957 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001958 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001959
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001960 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1961 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001962
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 strcpy(di.name, hdev->name);
1964 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001965 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 di.flags = hdev->flags;
1967 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001968 if (lmp_bredr_capable(hdev)) {
1969 di.acl_mtu = hdev->acl_mtu;
1970 di.acl_pkts = hdev->acl_pkts;
1971 di.sco_mtu = hdev->sco_mtu;
1972 di.sco_pkts = hdev->sco_pkts;
1973 } else {
1974 di.acl_mtu = hdev->le_mtu;
1975 di.acl_pkts = hdev->le_pkts;
1976 di.sco_mtu = 0;
1977 di.sco_pkts = 0;
1978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 di.link_policy = hdev->link_policy;
1980 di.link_mode = hdev->link_mode;
1981
1982 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1983 memcpy(&di.features, &hdev->features, sizeof(di.features));
1984
1985 if (copy_to_user(arg, &di, sizeof(di)))
1986 err = -EFAULT;
1987
1988 hci_dev_put(hdev);
1989
1990 return err;
1991}
1992
1993/* ---- Interface to HCI drivers ---- */
1994
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001995static int hci_rfkill_set_block(void *data, bool blocked)
1996{
1997 struct hci_dev *hdev = data;
1998
1999 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2000
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002001 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2002 return -EBUSY;
2003
Johan Hedberg5e130362013-09-13 08:58:17 +03002004 if (blocked) {
2005 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002006 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2007 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002008 } else {
2009 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002010 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002011
2012 return 0;
2013}
2014
2015static const struct rfkill_ops hci_rfkill_ops = {
2016 .set_block = hci_rfkill_set_block,
2017};
2018
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002019static void hci_power_on(struct work_struct *work)
2020{
2021 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002022 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002023
2024 BT_DBG("%s", hdev->name);
2025
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002026 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002027 if (err < 0) {
2028 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002029 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002030 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002031
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002032 /* During the HCI setup phase, a few error conditions are
2033 * ignored and they need to be checked now. If they are still
2034 * valid, it is important to turn the device back off.
2035 */
2036 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2037 (hdev->dev_type == HCI_BREDR &&
2038 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2039 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002040 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2041 hci_dev_do_close(hdev);
2042 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002043 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2044 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002045 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002046
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002047 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002048 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002049}
2050
2051static void hci_power_off(struct work_struct *work)
2052{
Johan Hedberg32435532011-11-07 22:16:04 +02002053 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002054 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002055
2056 BT_DBG("%s", hdev->name);
2057
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002058 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002059}
2060
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002061static void hci_discov_off(struct work_struct *work)
2062{
2063 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002064
2065 hdev = container_of(work, struct hci_dev, discov_off.work);
2066
2067 BT_DBG("%s", hdev->name);
2068
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002069 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002070}
2071
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002072int hci_uuids_clear(struct hci_dev *hdev)
2073{
Johan Hedberg48210022013-01-27 00:31:28 +02002074 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002075
Johan Hedberg48210022013-01-27 00:31:28 +02002076 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2077 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002078 kfree(uuid);
2079 }
2080
2081 return 0;
2082}
2083
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002084int hci_link_keys_clear(struct hci_dev *hdev)
2085{
2086 struct list_head *p, *n;
2087
2088 list_for_each_safe(p, n, &hdev->link_keys) {
2089 struct link_key *key;
2090
2091 key = list_entry(p, struct link_key, list);
2092
2093 list_del(p);
2094 kfree(key);
2095 }
2096
2097 return 0;
2098}
2099
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002100int hci_smp_ltks_clear(struct hci_dev *hdev)
2101{
2102 struct smp_ltk *k, *tmp;
2103
2104 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2105 list_del(&k->list);
2106 kfree(k);
2107 }
2108
2109 return 0;
2110}
2111
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002112struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2113{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002114 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002115
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002116 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002117 if (bacmp(bdaddr, &k->bdaddr) == 0)
2118 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002119
2120 return NULL;
2121}
2122
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302123static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002124 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002125{
2126 /* Legacy key */
2127 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302128 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002129
2130 /* Debug keys are insecure so don't store them persistently */
2131 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302132 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002133
2134 /* Changed combination key and there's no previous one */
2135 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302136 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002137
2138 /* Security mode 3 case */
2139 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302140 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002141
2142 /* Neither local nor remote side had no-bonding as requirement */
2143 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302144 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002145
2146 /* Local side had dedicated bonding as requirement */
2147 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302148 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002149
2150 /* Remote side had dedicated bonding as requirement */
2151 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302152 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002153
2154 /* If none of the above criteria match, then don't store the key
2155 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302156 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002157}
2158
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002159struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002160{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002161 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002162
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002163 list_for_each_entry(k, &hdev->long_term_keys, list) {
2164 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002165 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002166 continue;
2167
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002168 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002169 }
2170
2171 return NULL;
2172}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002173
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002174struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002175 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002176{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002177 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002178
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002179 list_for_each_entry(k, &hdev->long_term_keys, list)
2180 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002181 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002182 return k;
2183
2184 return NULL;
2185}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002186
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002187int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002188 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002189{
2190 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302191 u8 old_key_type;
2192 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002193
2194 old_key = hci_find_link_key(hdev, bdaddr);
2195 if (old_key) {
2196 old_key_type = old_key->type;
2197 key = old_key;
2198 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002199 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002200 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2201 if (!key)
2202 return -ENOMEM;
2203 list_add(&key->list, &hdev->link_keys);
2204 }
2205
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002206 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002207
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002208 /* Some buggy controller combinations generate a changed
2209 * combination key for legacy pairing even when there's no
2210 * previous key */
2211 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002212 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002213 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002214 if (conn)
2215 conn->key_type = type;
2216 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002217
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002218 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002219 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002220 key->pin_len = pin_len;
2221
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002222 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002223 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002224 else
2225 key->type = type;
2226
Johan Hedberg4df378a2011-04-28 11:29:03 -07002227 if (!new_key)
2228 return 0;
2229
2230 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2231
Johan Hedberg744cf192011-11-08 20:40:14 +02002232 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002233
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302234 if (conn)
2235 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002236
2237 return 0;
2238}
2239
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002240int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002241 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002242 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002243{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002244 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002245
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002246 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2247 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002248
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002249 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2250 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002251 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002252 else {
2253 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002254 if (!key)
2255 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002256 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002257 }
2258
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002259 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002260 key->bdaddr_type = addr_type;
2261 memcpy(key->val, tk, sizeof(key->val));
2262 key->authenticated = authenticated;
2263 key->ediv = ediv;
2264 key->enc_size = enc_size;
2265 key->type = type;
2266 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002267
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002268 if (!new_key)
2269 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002270
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002271 if (type & HCI_SMP_LTK)
2272 mgmt_new_ltk(hdev, key, 1);
2273
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002274 return 0;
2275}
2276
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002277int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2278{
2279 struct link_key *key;
2280
2281 key = hci_find_link_key(hdev, bdaddr);
2282 if (!key)
2283 return -ENOENT;
2284
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002285 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002286
2287 list_del(&key->list);
2288 kfree(key);
2289
2290 return 0;
2291}
2292
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002293int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2294{
2295 struct smp_ltk *k, *tmp;
2296
2297 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2298 if (bacmp(bdaddr, &k->bdaddr))
2299 continue;
2300
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002301 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002302
2303 list_del(&k->list);
2304 kfree(k);
2305 }
2306
2307 return 0;
2308}
2309
Ville Tervo6bd32322011-02-16 16:32:41 +02002310/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002311static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002312{
2313 struct hci_dev *hdev = (void *) arg;
2314
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002315 if (hdev->sent_cmd) {
2316 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2317 u16 opcode = __le16_to_cpu(sent->opcode);
2318
2319 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2320 } else {
2321 BT_ERR("%s command tx timeout", hdev->name);
2322 }
2323
Ville Tervo6bd32322011-02-16 16:32:41 +02002324 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002325 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002326}
2327
Szymon Janc2763eda2011-03-22 13:12:22 +01002328struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002329 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002330{
2331 struct oob_data *data;
2332
2333 list_for_each_entry(data, &hdev->remote_oob_data, list)
2334 if (bacmp(bdaddr, &data->bdaddr) == 0)
2335 return data;
2336
2337 return NULL;
2338}
2339
2340int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2341{
2342 struct oob_data *data;
2343
2344 data = hci_find_remote_oob_data(hdev, bdaddr);
2345 if (!data)
2346 return -ENOENT;
2347
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002348 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002349
2350 list_del(&data->list);
2351 kfree(data);
2352
2353 return 0;
2354}
2355
2356int hci_remote_oob_data_clear(struct hci_dev *hdev)
2357{
2358 struct oob_data *data, *n;
2359
2360 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2361 list_del(&data->list);
2362 kfree(data);
2363 }
2364
2365 return 0;
2366}
2367
2368int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002369 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002370{
2371 struct oob_data *data;
2372
2373 data = hci_find_remote_oob_data(hdev, bdaddr);
2374
2375 if (!data) {
2376 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2377 if (!data)
2378 return -ENOMEM;
2379
2380 bacpy(&data->bdaddr, bdaddr);
2381 list_add(&data->list, &hdev->remote_oob_data);
2382 }
2383
2384 memcpy(data->hash, hash, sizeof(data->hash));
2385 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2386
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002387 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002388
2389 return 0;
2390}
2391
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002392struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2393 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002394{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002395 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002396
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002397 list_for_each_entry(b, &hdev->blacklist, list) {
2398 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002399 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002400 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002401
2402 return NULL;
2403}
2404
2405int hci_blacklist_clear(struct hci_dev *hdev)
2406{
2407 struct list_head *p, *n;
2408
2409 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002410 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002411
2412 list_del(p);
2413 kfree(b);
2414 }
2415
2416 return 0;
2417}
2418
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002419int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002420{
2421 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002422
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002423 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002424 return -EBADF;
2425
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002426 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002427 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002428
2429 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002430 if (!entry)
2431 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002432
2433 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002434 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002435
2436 list_add(&entry->list, &hdev->blacklist);
2437
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002438 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002439}
2440
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002441int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002442{
2443 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002444
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002445 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002446 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002447
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002448 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002449 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002450 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002451
2452 list_del(&entry->list);
2453 kfree(entry);
2454
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002455 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002456}
2457
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002458static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002459{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002460 if (status) {
2461 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002462
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002463 hci_dev_lock(hdev);
2464 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2465 hci_dev_unlock(hdev);
2466 return;
2467 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002468}
2469
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002470static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002471{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002472 /* General inquiry access code (GIAC) */
2473 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2474 struct hci_request req;
2475 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002476 int err;
2477
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002478 if (status) {
2479 BT_ERR("Failed to disable LE scanning: status %d", status);
2480 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002481 }
2482
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002483 switch (hdev->discovery.type) {
2484 case DISCOV_TYPE_LE:
2485 hci_dev_lock(hdev);
2486 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2487 hci_dev_unlock(hdev);
2488 break;
2489
2490 case DISCOV_TYPE_INTERLEAVED:
2491 hci_req_init(&req, hdev);
2492
2493 memset(&cp, 0, sizeof(cp));
2494 memcpy(&cp.lap, lap, sizeof(cp.lap));
2495 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2496 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2497
2498 hci_dev_lock(hdev);
2499
2500 hci_inquiry_cache_flush(hdev);
2501
2502 err = hci_req_run(&req, inquiry_complete);
2503 if (err) {
2504 BT_ERR("Inquiry request failed: err %d", err);
2505 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2506 }
2507
2508 hci_dev_unlock(hdev);
2509 break;
2510 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002511}
2512
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002513static void le_scan_disable_work(struct work_struct *work)
2514{
2515 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002516 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002517 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002518 struct hci_request req;
2519 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002520
2521 BT_DBG("%s", hdev->name);
2522
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002523 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002524
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002525 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002526 cp.enable = LE_SCAN_DISABLE;
2527 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002528
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002529 err = hci_req_run(&req, le_scan_disable_work_complete);
2530 if (err)
2531 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002532}
2533
David Herrmann9be0dab2012-04-22 14:39:57 +02002534/* Alloc HCI device */
2535struct hci_dev *hci_alloc_dev(void)
2536{
2537 struct hci_dev *hdev;
2538
2539 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2540 if (!hdev)
2541 return NULL;
2542
David Herrmannb1b813d2012-04-22 14:39:58 +02002543 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2544 hdev->esco_type = (ESCO_HV1);
2545 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002546 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2547 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002548 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2549 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002550
David Herrmannb1b813d2012-04-22 14:39:58 +02002551 hdev->sniff_max_interval = 800;
2552 hdev->sniff_min_interval = 80;
2553
Marcel Holtmannbef64732013-10-11 08:23:19 -07002554 hdev->le_scan_interval = 0x0060;
2555 hdev->le_scan_window = 0x0030;
2556
David Herrmannb1b813d2012-04-22 14:39:58 +02002557 mutex_init(&hdev->lock);
2558 mutex_init(&hdev->req_lock);
2559
2560 INIT_LIST_HEAD(&hdev->mgmt_pending);
2561 INIT_LIST_HEAD(&hdev->blacklist);
2562 INIT_LIST_HEAD(&hdev->uuids);
2563 INIT_LIST_HEAD(&hdev->link_keys);
2564 INIT_LIST_HEAD(&hdev->long_term_keys);
2565 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002566 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002567
2568 INIT_WORK(&hdev->rx_work, hci_rx_work);
2569 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2570 INIT_WORK(&hdev->tx_work, hci_tx_work);
2571 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002572
David Herrmannb1b813d2012-04-22 14:39:58 +02002573 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2574 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2575 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2576
David Herrmannb1b813d2012-04-22 14:39:58 +02002577 skb_queue_head_init(&hdev->rx_q);
2578 skb_queue_head_init(&hdev->cmd_q);
2579 skb_queue_head_init(&hdev->raw_q);
2580
2581 init_waitqueue_head(&hdev->req_wait_q);
2582
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002583 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002584
David Herrmannb1b813d2012-04-22 14:39:58 +02002585 hci_init_sysfs(hdev);
2586 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002587
2588 return hdev;
2589}
2590EXPORT_SYMBOL(hci_alloc_dev);
2591
2592/* Free HCI device */
2593void hci_free_dev(struct hci_dev *hdev)
2594{
David Herrmann9be0dab2012-04-22 14:39:57 +02002595 /* will free via device release */
2596 put_device(&hdev->dev);
2597}
2598EXPORT_SYMBOL(hci_free_dev);
2599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600/* Register HCI device */
2601int hci_register_dev(struct hci_dev *hdev)
2602{
David Herrmannb1b813d2012-04-22 14:39:58 +02002603 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604
David Herrmann010666a2012-01-07 15:47:07 +01002605 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 return -EINVAL;
2607
Mat Martineau08add512011-11-02 16:18:36 -07002608 /* Do not allow HCI_AMP devices to register at index 0,
2609 * so the index can be used as the AMP controller ID.
2610 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002611 switch (hdev->dev_type) {
2612 case HCI_BREDR:
2613 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2614 break;
2615 case HCI_AMP:
2616 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2617 break;
2618 default:
2619 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002621
Sasha Levin3df92b32012-05-27 22:36:56 +02002622 if (id < 0)
2623 return id;
2624
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 sprintf(hdev->name, "hci%d", id);
2626 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002627
2628 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2629
Kees Cookd8537542013-07-03 15:04:57 -07002630 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2631 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002632 if (!hdev->workqueue) {
2633 error = -ENOMEM;
2634 goto err;
2635 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002636
Kees Cookd8537542013-07-03 15:04:57 -07002637 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2638 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002639 if (!hdev->req_workqueue) {
2640 destroy_workqueue(hdev->workqueue);
2641 error = -ENOMEM;
2642 goto err;
2643 }
2644
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002645 if (!IS_ERR_OR_NULL(bt_debugfs))
2646 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2647
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002648 dev_set_name(&hdev->dev, "%s", hdev->name);
2649
2650 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002651 if (error < 0)
2652 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002654 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002655 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2656 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002657 if (hdev->rfkill) {
2658 if (rfkill_register(hdev->rfkill) < 0) {
2659 rfkill_destroy(hdev->rfkill);
2660 hdev->rfkill = NULL;
2661 }
2662 }
2663
Johan Hedberg5e130362013-09-13 08:58:17 +03002664 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2665 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2666
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002667 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002668 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002669
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002670 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002671 /* Assume BR/EDR support until proven otherwise (such as
2672 * through reading supported features during init.
2673 */
2674 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2675 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002676
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002677 write_lock(&hci_dev_list_lock);
2678 list_add(&hdev->list, &hci_dev_list);
2679 write_unlock(&hci_dev_list_lock);
2680
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002682 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
Johan Hedberg19202572013-01-14 22:33:51 +02002684 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002687
David Herrmann33ca9542011-10-08 14:58:49 +02002688err_wqueue:
2689 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002690 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002691err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002692 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002693
David Herrmann33ca9542011-10-08 14:58:49 +02002694 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695}
2696EXPORT_SYMBOL(hci_register_dev);
2697
2698/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002699void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700{
Sasha Levin3df92b32012-05-27 22:36:56 +02002701 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002702
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002703 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
Johan Hovold94324962012-03-15 14:48:41 +01002705 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2706
Sasha Levin3df92b32012-05-27 22:36:56 +02002707 id = hdev->id;
2708
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002709 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002711 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712
2713 hci_dev_do_close(hdev);
2714
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302715 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002716 kfree_skb(hdev->reassembly[i]);
2717
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002718 cancel_work_sync(&hdev->power_on);
2719
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002720 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002721 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002722 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002723 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002724 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002725 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002726
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002727 /* mgmt_index_removed should take care of emptying the
2728 * pending list */
2729 BUG_ON(!list_empty(&hdev->mgmt_pending));
2730
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 hci_notify(hdev, HCI_DEV_UNREG);
2732
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002733 if (hdev->rfkill) {
2734 rfkill_unregister(hdev->rfkill);
2735 rfkill_destroy(hdev->rfkill);
2736 }
2737
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002738 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002739
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002740 debugfs_remove_recursive(hdev->debugfs);
2741
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002742 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002743 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002744
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002745 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002746 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002747 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002748 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002749 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002750 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002751 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002752
David Herrmanndc946bd2012-01-07 15:47:24 +01002753 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002754
2755 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756}
2757EXPORT_SYMBOL(hci_unregister_dev);
2758
2759/* Suspend HCI device */
2760int hci_suspend_dev(struct hci_dev *hdev)
2761{
2762 hci_notify(hdev, HCI_DEV_SUSPEND);
2763 return 0;
2764}
2765EXPORT_SYMBOL(hci_suspend_dev);
2766
2767/* Resume HCI device */
2768int hci_resume_dev(struct hci_dev *hdev)
2769{
2770 hci_notify(hdev, HCI_DEV_RESUME);
2771 return 0;
2772}
2773EXPORT_SYMBOL(hci_resume_dev);
2774
Marcel Holtmann76bca882009-11-18 00:40:39 +01002775/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002776int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002777{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002778 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002779 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002780 kfree_skb(skb);
2781 return -ENXIO;
2782 }
2783
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002784 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002785 bt_cb(skb)->incoming = 1;
2786
2787 /* Time stamp */
2788 __net_timestamp(skb);
2789
Marcel Holtmann76bca882009-11-18 00:40:39 +01002790 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002791 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002792
Marcel Holtmann76bca882009-11-18 00:40:39 +01002793 return 0;
2794}
2795EXPORT_SYMBOL(hci_recv_frame);
2796
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302797static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002798 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302799{
2800 int len = 0;
2801 int hlen = 0;
2802 int remain = count;
2803 struct sk_buff *skb;
2804 struct bt_skb_cb *scb;
2805
2806 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002807 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302808 return -EILSEQ;
2809
2810 skb = hdev->reassembly[index];
2811
2812 if (!skb) {
2813 switch (type) {
2814 case HCI_ACLDATA_PKT:
2815 len = HCI_MAX_FRAME_SIZE;
2816 hlen = HCI_ACL_HDR_SIZE;
2817 break;
2818 case HCI_EVENT_PKT:
2819 len = HCI_MAX_EVENT_SIZE;
2820 hlen = HCI_EVENT_HDR_SIZE;
2821 break;
2822 case HCI_SCODATA_PKT:
2823 len = HCI_MAX_SCO_SIZE;
2824 hlen = HCI_SCO_HDR_SIZE;
2825 break;
2826 }
2827
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002828 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302829 if (!skb)
2830 return -ENOMEM;
2831
2832 scb = (void *) skb->cb;
2833 scb->expect = hlen;
2834 scb->pkt_type = type;
2835
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302836 hdev->reassembly[index] = skb;
2837 }
2838
2839 while (count) {
2840 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002841 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302842
2843 memcpy(skb_put(skb, len), data, len);
2844
2845 count -= len;
2846 data += len;
2847 scb->expect -= len;
2848 remain = count;
2849
2850 switch (type) {
2851 case HCI_EVENT_PKT:
2852 if (skb->len == HCI_EVENT_HDR_SIZE) {
2853 struct hci_event_hdr *h = hci_event_hdr(skb);
2854 scb->expect = h->plen;
2855
2856 if (skb_tailroom(skb) < scb->expect) {
2857 kfree_skb(skb);
2858 hdev->reassembly[index] = NULL;
2859 return -ENOMEM;
2860 }
2861 }
2862 break;
2863
2864 case HCI_ACLDATA_PKT:
2865 if (skb->len == HCI_ACL_HDR_SIZE) {
2866 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2867 scb->expect = __le16_to_cpu(h->dlen);
2868
2869 if (skb_tailroom(skb) < scb->expect) {
2870 kfree_skb(skb);
2871 hdev->reassembly[index] = NULL;
2872 return -ENOMEM;
2873 }
2874 }
2875 break;
2876
2877 case HCI_SCODATA_PKT:
2878 if (skb->len == HCI_SCO_HDR_SIZE) {
2879 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2880 scb->expect = h->dlen;
2881
2882 if (skb_tailroom(skb) < scb->expect) {
2883 kfree_skb(skb);
2884 hdev->reassembly[index] = NULL;
2885 return -ENOMEM;
2886 }
2887 }
2888 break;
2889 }
2890
2891 if (scb->expect == 0) {
2892 /* Complete frame */
2893
2894 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002895 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302896
2897 hdev->reassembly[index] = NULL;
2898 return remain;
2899 }
2900 }
2901
2902 return remain;
2903}
2904
Marcel Holtmannef222012007-07-11 06:42:04 +02002905int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2906{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302907 int rem = 0;
2908
Marcel Holtmannef222012007-07-11 06:42:04 +02002909 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2910 return -EILSEQ;
2911
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002912 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002913 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302914 if (rem < 0)
2915 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002916
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302917 data += (count - rem);
2918 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002919 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002920
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302921 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002922}
2923EXPORT_SYMBOL(hci_recv_fragment);
2924
Suraj Sumangala99811512010-07-14 13:02:19 +05302925#define STREAM_REASSEMBLY 0
2926
2927int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2928{
2929 int type;
2930 int rem = 0;
2931
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002932 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302933 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2934
2935 if (!skb) {
2936 struct { char type; } *pkt;
2937
2938 /* Start of the frame */
2939 pkt = data;
2940 type = pkt->type;
2941
2942 data++;
2943 count--;
2944 } else
2945 type = bt_cb(skb)->pkt_type;
2946
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002947 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002948 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302949 if (rem < 0)
2950 return rem;
2951
2952 data += (count - rem);
2953 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002954 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302955
2956 return rem;
2957}
2958EXPORT_SYMBOL(hci_recv_stream_fragment);
2959
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960/* ---- Interface to upper protocols ---- */
2961
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962int hci_register_cb(struct hci_cb *cb)
2963{
2964 BT_DBG("%p name %s", cb, cb->name);
2965
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002966 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002968 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969
2970 return 0;
2971}
2972EXPORT_SYMBOL(hci_register_cb);
2973
2974int hci_unregister_cb(struct hci_cb *cb)
2975{
2976 BT_DBG("%p name %s", cb, cb->name);
2977
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002978 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002980 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981
2982 return 0;
2983}
2984EXPORT_SYMBOL(hci_unregister_cb);
2985
Marcel Holtmann51086992013-10-10 14:54:19 -07002986static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002988 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002990 /* Time stamp */
2991 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002993 /* Send copy to monitor */
2994 hci_send_to_monitor(hdev, skb);
2995
2996 if (atomic_read(&hdev->promisc)) {
2997 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002998 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 }
3000
3001 /* Get rid of skb owner, prior to sending to the driver. */
3002 skb_orphan(skb);
3003
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003004 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003005 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006}
3007
Johan Hedberg3119ae92013-03-05 20:37:44 +02003008void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3009{
3010 skb_queue_head_init(&req->cmd_q);
3011 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003012 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003013}
3014
3015int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3016{
3017 struct hci_dev *hdev = req->hdev;
3018 struct sk_buff *skb;
3019 unsigned long flags;
3020
3021 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3022
Andre Guedes5d73e032013-03-08 11:20:16 -03003023 /* If an error occured during request building, remove all HCI
3024 * commands queued on the HCI request queue.
3025 */
3026 if (req->err) {
3027 skb_queue_purge(&req->cmd_q);
3028 return req->err;
3029 }
3030
Johan Hedberg3119ae92013-03-05 20:37:44 +02003031 /* Do not allow empty requests */
3032 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003033 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003034
3035 skb = skb_peek_tail(&req->cmd_q);
3036 bt_cb(skb)->req.complete = complete;
3037
3038 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3039 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3040 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3041
3042 queue_work(hdev->workqueue, &hdev->cmd_work);
3043
3044 return 0;
3045}
3046
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003047static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003048 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049{
3050 int len = HCI_COMMAND_HDR_SIZE + plen;
3051 struct hci_command_hdr *hdr;
3052 struct sk_buff *skb;
3053
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003055 if (!skb)
3056 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057
3058 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003059 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 hdr->plen = plen;
3061
3062 if (plen)
3063 memcpy(skb_put(skb, plen), param, plen);
3064
3065 BT_DBG("skb len %d", skb->len);
3066
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003067 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003068
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003069 return skb;
3070}
3071
3072/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003073int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3074 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003075{
3076 struct sk_buff *skb;
3077
3078 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3079
3080 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3081 if (!skb) {
3082 BT_ERR("%s no memory for command", hdev->name);
3083 return -ENOMEM;
3084 }
3085
Johan Hedberg11714b32013-03-05 20:37:47 +02003086 /* Stand-alone HCI commands must be flaged as
3087 * single-command requests.
3088 */
3089 bt_cb(skb)->req.start = true;
3090
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003092 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093
3094 return 0;
3095}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096
Johan Hedberg71c76a12013-03-05 20:37:46 +02003097/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003098void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3099 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003100{
3101 struct hci_dev *hdev = req->hdev;
3102 struct sk_buff *skb;
3103
3104 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3105
Andre Guedes34739c12013-03-08 11:20:18 -03003106 /* If an error occured during request building, there is no point in
3107 * queueing the HCI command. We can simply return.
3108 */
3109 if (req->err)
3110 return;
3111
Johan Hedberg71c76a12013-03-05 20:37:46 +02003112 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3113 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003114 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3115 hdev->name, opcode);
3116 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003117 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003118 }
3119
3120 if (skb_queue_empty(&req->cmd_q))
3121 bt_cb(skb)->req.start = true;
3122
Johan Hedberg02350a72013-04-03 21:50:29 +03003123 bt_cb(skb)->req.event = event;
3124
Johan Hedberg71c76a12013-03-05 20:37:46 +02003125 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003126}
3127
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003128void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3129 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003130{
3131 hci_req_add_ev(req, opcode, plen, param, 0);
3132}
3133
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003135void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136{
3137 struct hci_command_hdr *hdr;
3138
3139 if (!hdev->sent_cmd)
3140 return NULL;
3141
3142 hdr = (void *) hdev->sent_cmd->data;
3143
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003144 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 return NULL;
3146
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003147 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148
3149 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3150}
3151
3152/* Send ACL data */
3153static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3154{
3155 struct hci_acl_hdr *hdr;
3156 int len = skb->len;
3157
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003158 skb_push(skb, HCI_ACL_HDR_SIZE);
3159 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003160 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003161 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3162 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163}
3164
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003165static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003166 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003168 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 struct hci_dev *hdev = conn->hdev;
3170 struct sk_buff *list;
3171
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003172 skb->len = skb_headlen(skb);
3173 skb->data_len = 0;
3174
3175 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003176
3177 switch (hdev->dev_type) {
3178 case HCI_BREDR:
3179 hci_add_acl_hdr(skb, conn->handle, flags);
3180 break;
3181 case HCI_AMP:
3182 hci_add_acl_hdr(skb, chan->handle, flags);
3183 break;
3184 default:
3185 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3186 return;
3187 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003188
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003189 list = skb_shinfo(skb)->frag_list;
3190 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 /* Non fragmented */
3192 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3193
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003194 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 } else {
3196 /* Fragmented */
3197 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3198
3199 skb_shinfo(skb)->frag_list = NULL;
3200
3201 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003202 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003204 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003205
3206 flags &= ~ACL_START;
3207 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 do {
3209 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003210
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003211 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003212 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213
3214 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3215
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003216 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217 } while (list);
3218
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003219 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003221}
3222
3223void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3224{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003225 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003226
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003227 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003228
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003229 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003231 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233
3234/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003235void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236{
3237 struct hci_dev *hdev = conn->hdev;
3238 struct hci_sco_hdr hdr;
3239
3240 BT_DBG("%s len %d", hdev->name, skb->len);
3241
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003242 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 hdr.dlen = skb->len;
3244
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003245 skb_push(skb, HCI_SCO_HDR_SIZE);
3246 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003247 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003249 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003250
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003252 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254
3255/* ---- HCI TX task (outgoing data) ---- */
3256
3257/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003258static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3259 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260{
3261 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003262 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003263 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003265 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003267
3268 rcu_read_lock();
3269
3270 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003271 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003273
3274 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3275 continue;
3276
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 num++;
3278
3279 if (c->sent < min) {
3280 min = c->sent;
3281 conn = c;
3282 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003283
3284 if (hci_conn_num(hdev, type) == num)
3285 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 }
3287
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003288 rcu_read_unlock();
3289
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003291 int cnt, q;
3292
3293 switch (conn->type) {
3294 case ACL_LINK:
3295 cnt = hdev->acl_cnt;
3296 break;
3297 case SCO_LINK:
3298 case ESCO_LINK:
3299 cnt = hdev->sco_cnt;
3300 break;
3301 case LE_LINK:
3302 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3303 break;
3304 default:
3305 cnt = 0;
3306 BT_ERR("Unknown link type");
3307 }
3308
3309 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310 *quote = q ? q : 1;
3311 } else
3312 *quote = 0;
3313
3314 BT_DBG("conn %p quote %d", conn, *quote);
3315 return conn;
3316}
3317
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003318static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319{
3320 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003321 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322
Ville Tervobae1f5d92011-02-10 22:38:53 -03003323 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003325 rcu_read_lock();
3326
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003328 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003329 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003330 BT_ERR("%s killing stalled connection %pMR",
3331 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003332 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 }
3334 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003335
3336 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337}
3338
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003339static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3340 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003341{
3342 struct hci_conn_hash *h = &hdev->conn_hash;
3343 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003344 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003345 struct hci_conn *conn;
3346 int cnt, q, conn_num = 0;
3347
3348 BT_DBG("%s", hdev->name);
3349
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003350 rcu_read_lock();
3351
3352 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003353 struct hci_chan *tmp;
3354
3355 if (conn->type != type)
3356 continue;
3357
3358 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3359 continue;
3360
3361 conn_num++;
3362
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003363 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003364 struct sk_buff *skb;
3365
3366 if (skb_queue_empty(&tmp->data_q))
3367 continue;
3368
3369 skb = skb_peek(&tmp->data_q);
3370 if (skb->priority < cur_prio)
3371 continue;
3372
3373 if (skb->priority > cur_prio) {
3374 num = 0;
3375 min = ~0;
3376 cur_prio = skb->priority;
3377 }
3378
3379 num++;
3380
3381 if (conn->sent < min) {
3382 min = conn->sent;
3383 chan = tmp;
3384 }
3385 }
3386
3387 if (hci_conn_num(hdev, type) == conn_num)
3388 break;
3389 }
3390
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003391 rcu_read_unlock();
3392
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003393 if (!chan)
3394 return NULL;
3395
3396 switch (chan->conn->type) {
3397 case ACL_LINK:
3398 cnt = hdev->acl_cnt;
3399 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003400 case AMP_LINK:
3401 cnt = hdev->block_cnt;
3402 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003403 case SCO_LINK:
3404 case ESCO_LINK:
3405 cnt = hdev->sco_cnt;
3406 break;
3407 case LE_LINK:
3408 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3409 break;
3410 default:
3411 cnt = 0;
3412 BT_ERR("Unknown link type");
3413 }
3414
3415 q = cnt / num;
3416 *quote = q ? q : 1;
3417 BT_DBG("chan %p quote %d", chan, *quote);
3418 return chan;
3419}
3420
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003421static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3422{
3423 struct hci_conn_hash *h = &hdev->conn_hash;
3424 struct hci_conn *conn;
3425 int num = 0;
3426
3427 BT_DBG("%s", hdev->name);
3428
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003429 rcu_read_lock();
3430
3431 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003432 struct hci_chan *chan;
3433
3434 if (conn->type != type)
3435 continue;
3436
3437 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3438 continue;
3439
3440 num++;
3441
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003442 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003443 struct sk_buff *skb;
3444
3445 if (chan->sent) {
3446 chan->sent = 0;
3447 continue;
3448 }
3449
3450 if (skb_queue_empty(&chan->data_q))
3451 continue;
3452
3453 skb = skb_peek(&chan->data_q);
3454 if (skb->priority >= HCI_PRIO_MAX - 1)
3455 continue;
3456
3457 skb->priority = HCI_PRIO_MAX - 1;
3458
3459 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003460 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003461 }
3462
3463 if (hci_conn_num(hdev, type) == num)
3464 break;
3465 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003466
3467 rcu_read_unlock();
3468
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003469}
3470
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003471static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3472{
3473 /* Calculate count of blocks used by this packet */
3474 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3475}
3476
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003477static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 if (!test_bit(HCI_RAW, &hdev->flags)) {
3480 /* ACL tx timeout must be longer than maximum
3481 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003482 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003483 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003484 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003486}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003488static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003489{
3490 unsigned int cnt = hdev->acl_cnt;
3491 struct hci_chan *chan;
3492 struct sk_buff *skb;
3493 int quote;
3494
3495 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003496
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003497 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003498 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003499 u32 priority = (skb_peek(&chan->data_q))->priority;
3500 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003501 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003502 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003503
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003504 /* Stop if priority has changed */
3505 if (skb->priority < priority)
3506 break;
3507
3508 skb = skb_dequeue(&chan->data_q);
3509
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003510 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003511 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003512
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003513 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 hdev->acl_last_tx = jiffies;
3515
3516 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003517 chan->sent++;
3518 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 }
3520 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003521
3522 if (cnt != hdev->acl_cnt)
3523 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524}
3525
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003526static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003527{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003528 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003529 struct hci_chan *chan;
3530 struct sk_buff *skb;
3531 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003532 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003533
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003534 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003535
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003536 BT_DBG("%s", hdev->name);
3537
3538 if (hdev->dev_type == HCI_AMP)
3539 type = AMP_LINK;
3540 else
3541 type = ACL_LINK;
3542
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003543 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003544 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003545 u32 priority = (skb_peek(&chan->data_q))->priority;
3546 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3547 int blocks;
3548
3549 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003550 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003551
3552 /* Stop if priority has changed */
3553 if (skb->priority < priority)
3554 break;
3555
3556 skb = skb_dequeue(&chan->data_q);
3557
3558 blocks = __get_blocks(hdev, skb);
3559 if (blocks > hdev->block_cnt)
3560 return;
3561
3562 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003563 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003564
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003565 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003566 hdev->acl_last_tx = jiffies;
3567
3568 hdev->block_cnt -= blocks;
3569 quote -= blocks;
3570
3571 chan->sent += blocks;
3572 chan->conn->sent += blocks;
3573 }
3574 }
3575
3576 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003577 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003578}
3579
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003580static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003581{
3582 BT_DBG("%s", hdev->name);
3583
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003584 /* No ACL link over BR/EDR controller */
3585 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3586 return;
3587
3588 /* No AMP link over AMP controller */
3589 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003590 return;
3591
3592 switch (hdev->flow_ctl_mode) {
3593 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3594 hci_sched_acl_pkt(hdev);
3595 break;
3596
3597 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3598 hci_sched_acl_blk(hdev);
3599 break;
3600 }
3601}
3602
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003604static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605{
3606 struct hci_conn *conn;
3607 struct sk_buff *skb;
3608 int quote;
3609
3610 BT_DBG("%s", hdev->name);
3611
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003612 if (!hci_conn_num(hdev, SCO_LINK))
3613 return;
3614
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3616 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3617 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003618 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619
3620 conn->sent++;
3621 if (conn->sent == ~0)
3622 conn->sent = 0;
3623 }
3624 }
3625}
3626
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003627static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003628{
3629 struct hci_conn *conn;
3630 struct sk_buff *skb;
3631 int quote;
3632
3633 BT_DBG("%s", hdev->name);
3634
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003635 if (!hci_conn_num(hdev, ESCO_LINK))
3636 return;
3637
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003638 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3639 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003640 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3641 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003642 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003643
3644 conn->sent++;
3645 if (conn->sent == ~0)
3646 conn->sent = 0;
3647 }
3648 }
3649}
3650
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003651static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003652{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003653 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003654 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003655 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003656
3657 BT_DBG("%s", hdev->name);
3658
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003659 if (!hci_conn_num(hdev, LE_LINK))
3660 return;
3661
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003662 if (!test_bit(HCI_RAW, &hdev->flags)) {
3663 /* LE tx timeout must be longer than maximum
3664 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003665 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003666 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003667 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003668 }
3669
3670 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003671 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003672 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003673 u32 priority = (skb_peek(&chan->data_q))->priority;
3674 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003675 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003676 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003677
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003678 /* Stop if priority has changed */
3679 if (skb->priority < priority)
3680 break;
3681
3682 skb = skb_dequeue(&chan->data_q);
3683
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003684 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003685 hdev->le_last_tx = jiffies;
3686
3687 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003688 chan->sent++;
3689 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003690 }
3691 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003692
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003693 if (hdev->le_pkts)
3694 hdev->le_cnt = cnt;
3695 else
3696 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003697
3698 if (cnt != tmp)
3699 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003700}
3701
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003702static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003704 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 struct sk_buff *skb;
3706
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003707 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003708 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709
Marcel Holtmann52de5992013-09-03 18:08:38 -07003710 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3711 /* Schedule queues and send stuff to HCI driver */
3712 hci_sched_acl(hdev);
3713 hci_sched_sco(hdev);
3714 hci_sched_esco(hdev);
3715 hci_sched_le(hdev);
3716 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003717
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718 /* Send next queued raw (unknown type) packet */
3719 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003720 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721}
3722
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003723/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724
3725/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003726static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727{
3728 struct hci_acl_hdr *hdr = (void *) skb->data;
3729 struct hci_conn *conn;
3730 __u16 handle, flags;
3731
3732 skb_pull(skb, HCI_ACL_HDR_SIZE);
3733
3734 handle = __le16_to_cpu(hdr->handle);
3735 flags = hci_flags(handle);
3736 handle = hci_handle(handle);
3737
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003738 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003739 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740
3741 hdev->stat.acl_rx++;
3742
3743 hci_dev_lock(hdev);
3744 conn = hci_conn_hash_lookup_handle(hdev, handle);
3745 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003746
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003748 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003749
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003751 l2cap_recv_acldata(conn, skb, flags);
3752 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003754 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003755 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 }
3757
3758 kfree_skb(skb);
3759}
3760
3761/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003762static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763{
3764 struct hci_sco_hdr *hdr = (void *) skb->data;
3765 struct hci_conn *conn;
3766 __u16 handle;
3767
3768 skb_pull(skb, HCI_SCO_HDR_SIZE);
3769
3770 handle = __le16_to_cpu(hdr->handle);
3771
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003772 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773
3774 hdev->stat.sco_rx++;
3775
3776 hci_dev_lock(hdev);
3777 conn = hci_conn_hash_lookup_handle(hdev, handle);
3778 hci_dev_unlock(hdev);
3779
3780 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003782 sco_recv_scodata(conn, skb);
3783 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003785 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003786 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 }
3788
3789 kfree_skb(skb);
3790}
3791
Johan Hedberg9238f362013-03-05 20:37:48 +02003792static bool hci_req_is_complete(struct hci_dev *hdev)
3793{
3794 struct sk_buff *skb;
3795
3796 skb = skb_peek(&hdev->cmd_q);
3797 if (!skb)
3798 return true;
3799
3800 return bt_cb(skb)->req.start;
3801}
3802
Johan Hedberg42c6b122013-03-05 20:37:49 +02003803static void hci_resend_last(struct hci_dev *hdev)
3804{
3805 struct hci_command_hdr *sent;
3806 struct sk_buff *skb;
3807 u16 opcode;
3808
3809 if (!hdev->sent_cmd)
3810 return;
3811
3812 sent = (void *) hdev->sent_cmd->data;
3813 opcode = __le16_to_cpu(sent->opcode);
3814 if (opcode == HCI_OP_RESET)
3815 return;
3816
3817 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3818 if (!skb)
3819 return;
3820
3821 skb_queue_head(&hdev->cmd_q, skb);
3822 queue_work(hdev->workqueue, &hdev->cmd_work);
3823}
3824
Johan Hedberg9238f362013-03-05 20:37:48 +02003825void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3826{
3827 hci_req_complete_t req_complete = NULL;
3828 struct sk_buff *skb;
3829 unsigned long flags;
3830
3831 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3832
Johan Hedberg42c6b122013-03-05 20:37:49 +02003833 /* If the completed command doesn't match the last one that was
3834 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003835 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003836 if (!hci_sent_cmd_data(hdev, opcode)) {
3837 /* Some CSR based controllers generate a spontaneous
3838 * reset complete event during init and any pending
3839 * command will never be completed. In such a case we
3840 * need to resend whatever was the last sent
3841 * command.
3842 */
3843 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3844 hci_resend_last(hdev);
3845
Johan Hedberg9238f362013-03-05 20:37:48 +02003846 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003847 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003848
3849 /* If the command succeeded and there's still more commands in
3850 * this request the request is not yet complete.
3851 */
3852 if (!status && !hci_req_is_complete(hdev))
3853 return;
3854
3855 /* If this was the last command in a request the complete
3856 * callback would be found in hdev->sent_cmd instead of the
3857 * command queue (hdev->cmd_q).
3858 */
3859 if (hdev->sent_cmd) {
3860 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003861
3862 if (req_complete) {
3863 /* We must set the complete callback to NULL to
3864 * avoid calling the callback more than once if
3865 * this function gets called again.
3866 */
3867 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3868
Johan Hedberg9238f362013-03-05 20:37:48 +02003869 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003870 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003871 }
3872
3873 /* Remove all pending commands belonging to this request */
3874 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3875 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3876 if (bt_cb(skb)->req.start) {
3877 __skb_queue_head(&hdev->cmd_q, skb);
3878 break;
3879 }
3880
3881 req_complete = bt_cb(skb)->req.complete;
3882 kfree_skb(skb);
3883 }
3884 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3885
3886call_complete:
3887 if (req_complete)
3888 req_complete(hdev, status);
3889}
3890
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003891static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003893 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894 struct sk_buff *skb;
3895
3896 BT_DBG("%s", hdev->name);
3897
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003899 /* Send copy to monitor */
3900 hci_send_to_monitor(hdev, skb);
3901
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902 if (atomic_read(&hdev->promisc)) {
3903 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003904 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 }
3906
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003907 if (test_bit(HCI_RAW, &hdev->flags) ||
3908 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 kfree_skb(skb);
3910 continue;
3911 }
3912
3913 if (test_bit(HCI_INIT, &hdev->flags)) {
3914 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003915 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 case HCI_ACLDATA_PKT:
3917 case HCI_SCODATA_PKT:
3918 kfree_skb(skb);
3919 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003920 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 }
3922
3923 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003924 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003926 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927 hci_event_packet(hdev, skb);
3928 break;
3929
3930 case HCI_ACLDATA_PKT:
3931 BT_DBG("%s ACL data packet", hdev->name);
3932 hci_acldata_packet(hdev, skb);
3933 break;
3934
3935 case HCI_SCODATA_PKT:
3936 BT_DBG("%s SCO data packet", hdev->name);
3937 hci_scodata_packet(hdev, skb);
3938 break;
3939
3940 default:
3941 kfree_skb(skb);
3942 break;
3943 }
3944 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945}
3946
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003947static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003949 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 struct sk_buff *skb;
3951
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003952 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3953 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003956 if (atomic_read(&hdev->cmd_cnt)) {
3957 skb = skb_dequeue(&hdev->cmd_q);
3958 if (!skb)
3959 return;
3960
Wei Yongjun7585b972009-02-25 18:29:52 +08003961 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003963 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003964 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003966 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003967 if (test_bit(HCI_RESET, &hdev->flags))
3968 del_timer(&hdev->cmd_timer);
3969 else
3970 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003971 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972 } else {
3973 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003974 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 }
3976 }
3977}