blob: a4047d626b78505bc1d7acadc055cf756f543231 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070061static int blacklist_show(struct seq_file *f, void *p)
62{
63 struct hci_dev *hdev = f->private;
64 struct bdaddr_list *b;
65
66 hci_dev_lock(hdev);
67 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -070068 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070069 hci_dev_unlock(hdev);
70
71 return 0;
72}
73
74static int blacklist_open(struct inode *inode, struct file *file)
75{
76 return single_open(file, blacklist_show, inode->i_private);
77}
78
79static const struct file_operations blacklist_fops = {
80 .open = blacklist_open,
81 .read = seq_read,
82 .llseek = seq_lseek,
83 .release = single_release,
84};
85
Marcel Holtmann47219832013-10-17 17:24:15 -070086static int uuids_show(struct seq_file *f, void *p)
87{
88 struct hci_dev *hdev = f->private;
89 struct bt_uuid *uuid;
90
91 hci_dev_lock(hdev);
92 list_for_each_entry(uuid, &hdev->uuids, list) {
93 u32 data0, data5;
94 u16 data1, data2, data3, data4;
95
96 data5 = get_unaligned_le32(uuid);
97 data4 = get_unaligned_le16(uuid + 4);
98 data3 = get_unaligned_le16(uuid + 6);
99 data2 = get_unaligned_le16(uuid + 8);
100 data1 = get_unaligned_le16(uuid + 10);
101 data0 = get_unaligned_le32(uuid + 12);
102
103 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
104 data0, data1, data2, data3, data4, data5);
105 }
106 hci_dev_unlock(hdev);
107
108 return 0;
109}
110
111static int uuids_open(struct inode *inode, struct file *file)
112{
113 return single_open(file, uuids_show, inode->i_private);
114}
115
116static const struct file_operations uuids_fops = {
117 .open = uuids_open,
118 .read = seq_read,
119 .llseek = seq_lseek,
120 .release = single_release,
121};
122
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700123static int inquiry_cache_show(struct seq_file *f, void *p)
124{
125 struct hci_dev *hdev = f->private;
126 struct discovery_state *cache = &hdev->discovery;
127 struct inquiry_entry *e;
128
129 hci_dev_lock(hdev);
130
131 list_for_each_entry(e, &cache->all, all) {
132 struct inquiry_data *data = &e->data;
133 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
134 &data->bdaddr,
135 data->pscan_rep_mode, data->pscan_period_mode,
136 data->pscan_mode, data->dev_class[2],
137 data->dev_class[1], data->dev_class[0],
138 __le16_to_cpu(data->clock_offset),
139 data->rssi, data->ssp_mode, e->timestamp);
140 }
141
142 hci_dev_unlock(hdev);
143
144 return 0;
145}
146
147static int inquiry_cache_open(struct inode *inode, struct file *file)
148{
149 return single_open(file, inquiry_cache_show, inode->i_private);
150}
151
152static const struct file_operations inquiry_cache_fops = {
153 .open = inquiry_cache_open,
154 .read = seq_read,
155 .llseek = seq_lseek,
156 .release = single_release,
157};
158
Marcel Holtmann041000b2013-10-17 12:02:31 -0700159static int voice_setting_get(void *data, u64 *val)
160{
161 struct hci_dev *hdev = data;
162
163 hci_dev_lock(hdev);
164 *val = hdev->voice_setting;
165 hci_dev_unlock(hdev);
166
167 return 0;
168}
169
170DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
171 NULL, "0x%4.4llx\n");
172
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700173static int auto_accept_delay_set(void *data, u64 val)
174{
175 struct hci_dev *hdev = data;
176
177 hci_dev_lock(hdev);
178 hdev->auto_accept_delay = val;
179 hci_dev_unlock(hdev);
180
181 return 0;
182}
183
184static int auto_accept_delay_get(void *data, u64 *val)
185{
186 struct hci_dev *hdev = data;
187
188 hci_dev_lock(hdev);
189 *val = hdev->auto_accept_delay;
190 hci_dev_unlock(hdev);
191
192 return 0;
193}
194
195DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
196 auto_accept_delay_set, "%llu\n");
197
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700198static int static_address_show(struct seq_file *f, void *p)
199{
200 struct hci_dev *hdev = f->private;
201
202 hci_dev_lock(hdev);
203 seq_printf(f, "%pMR\n", &hdev->static_addr);
204 hci_dev_unlock(hdev);
205
206 return 0;
207}
208
209static int static_address_open(struct inode *inode, struct file *file)
210{
211 return single_open(file, static_address_show, inode->i_private);
212}
213
214static const struct file_operations static_address_fops = {
215 .open = static_address_open,
216 .read = seq_read,
217 .llseek = seq_lseek,
218 .release = single_release,
219};
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221/* ---- HCI requests ---- */
222
Johan Hedberg42c6b122013-03-05 20:37:49 +0200223static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200225 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 if (hdev->req_status == HCI_REQ_PEND) {
228 hdev->req_result = result;
229 hdev->req_status = HCI_REQ_DONE;
230 wake_up_interruptible(&hdev->req_wait_q);
231 }
232}
233
234static void hci_req_cancel(struct hci_dev *hdev, int err)
235{
236 BT_DBG("%s err 0x%2.2x", hdev->name, err);
237
238 if (hdev->req_status == HCI_REQ_PEND) {
239 hdev->req_result = err;
240 hdev->req_status = HCI_REQ_CANCELED;
241 wake_up_interruptible(&hdev->req_wait_q);
242 }
243}
244
Fengguang Wu77a63e02013-04-20 16:24:31 +0300245static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
246 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300247{
248 struct hci_ev_cmd_complete *ev;
249 struct hci_event_hdr *hdr;
250 struct sk_buff *skb;
251
252 hci_dev_lock(hdev);
253
254 skb = hdev->recv_evt;
255 hdev->recv_evt = NULL;
256
257 hci_dev_unlock(hdev);
258
259 if (!skb)
260 return ERR_PTR(-ENODATA);
261
262 if (skb->len < sizeof(*hdr)) {
263 BT_ERR("Too short HCI event");
264 goto failed;
265 }
266
267 hdr = (void *) skb->data;
268 skb_pull(skb, HCI_EVENT_HDR_SIZE);
269
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300270 if (event) {
271 if (hdr->evt != event)
272 goto failed;
273 return skb;
274 }
275
Johan Hedberg75e84b72013-04-02 13:35:04 +0300276 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
277 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
278 goto failed;
279 }
280
281 if (skb->len < sizeof(*ev)) {
282 BT_ERR("Too short cmd_complete event");
283 goto failed;
284 }
285
286 ev = (void *) skb->data;
287 skb_pull(skb, sizeof(*ev));
288
289 if (opcode == __le16_to_cpu(ev->opcode))
290 return skb;
291
292 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
293 __le16_to_cpu(ev->opcode));
294
295failed:
296 kfree_skb(skb);
297 return ERR_PTR(-ENODATA);
298}
299
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300300struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300301 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300302{
303 DECLARE_WAITQUEUE(wait, current);
304 struct hci_request req;
305 int err = 0;
306
307 BT_DBG("%s", hdev->name);
308
309 hci_req_init(&req, hdev);
310
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300311 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300312
313 hdev->req_status = HCI_REQ_PEND;
314
315 err = hci_req_run(&req, hci_req_sync_complete);
316 if (err < 0)
317 return ERR_PTR(err);
318
319 add_wait_queue(&hdev->req_wait_q, &wait);
320 set_current_state(TASK_INTERRUPTIBLE);
321
322 schedule_timeout(timeout);
323
324 remove_wait_queue(&hdev->req_wait_q, &wait);
325
326 if (signal_pending(current))
327 return ERR_PTR(-EINTR);
328
329 switch (hdev->req_status) {
330 case HCI_REQ_DONE:
331 err = -bt_to_errno(hdev->req_result);
332 break;
333
334 case HCI_REQ_CANCELED:
335 err = -hdev->req_result;
336 break;
337
338 default:
339 err = -ETIMEDOUT;
340 break;
341 }
342
343 hdev->req_status = hdev->req_result = 0;
344
345 BT_DBG("%s end: err %d", hdev->name, err);
346
347 if (err < 0)
348 return ERR_PTR(err);
349
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300350 return hci_get_cmd_complete(hdev, opcode, event);
351}
352EXPORT_SYMBOL(__hci_cmd_sync_ev);
353
354struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300355 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300356{
357 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300358}
359EXPORT_SYMBOL(__hci_cmd_sync);
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200362static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 void (*func)(struct hci_request *req,
364 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200365 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200367 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 DECLARE_WAITQUEUE(wait, current);
369 int err = 0;
370
371 BT_DBG("%s start", hdev->name);
372
Johan Hedberg42c6b122013-03-05 20:37:49 +0200373 hci_req_init(&req, hdev);
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 hdev->req_status = HCI_REQ_PEND;
376
Johan Hedberg42c6b122013-03-05 20:37:49 +0200377 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 err = hci_req_run(&req, hci_req_sync_complete);
380 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200381 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300382
383 /* ENODATA means the HCI request command queue is empty.
384 * This can happen when a request with conditionals doesn't
385 * trigger any commands to be sent. This is normal behavior
386 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 */
Andre Guedes920c8302013-03-08 11:20:15 -0300388 if (err == -ENODATA)
389 return 0;
390
391 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200392 }
393
Andre Guedesbc4445c2013-03-08 11:20:13 -0300394 add_wait_queue(&hdev->req_wait_q, &wait);
395 set_current_state(TASK_INTERRUPTIBLE);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 schedule_timeout(timeout);
398
399 remove_wait_queue(&hdev->req_wait_q, &wait);
400
401 if (signal_pending(current))
402 return -EINTR;
403
404 switch (hdev->req_status) {
405 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700406 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 break;
408
409 case HCI_REQ_CANCELED:
410 err = -hdev->req_result;
411 break;
412
413 default:
414 err = -ETIMEDOUT;
415 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Johan Hedberga5040ef2011-01-10 13:28:59 +0200418 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 BT_DBG("%s end: err %d", hdev->name, err);
421
422 return err;
423}
424
Johan Hedberg01178cd2013-03-05 20:37:41 +0200425static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426 void (*req)(struct hci_request *req,
427 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200428 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
430 int ret;
431
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200432 if (!test_bit(HCI_UP, &hdev->flags))
433 return -ENETDOWN;
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 /* Serialize all requests */
436 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200437 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 hci_req_unlock(hdev);
439
440 return ret;
441}
442
Johan Hedberg42c6b122013-03-05 20:37:49 +0200443static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448 set_bit(HCI_RESET, &req->hdev->flags);
449 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
451
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200457 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200459 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200460 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461
462 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200463 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200467{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200468 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200469
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200470 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300472
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700473 /* Read Local Supported Commands */
474 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
475
476 /* Read Local Supported Features */
477 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
478
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300479 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200480 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300481
482 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200483 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700484
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700485 /* Read Flow Control Mode */
486 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
487
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700488 /* Read Location Data */
489 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200490}
491
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200493{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200495
496 BT_DBG("%s %ld", hdev->name, opt);
497
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300498 /* Reset */
499 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200500 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300501
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200502 switch (hdev->dev_type) {
503 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200505 break;
506
507 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200508 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200509 break;
510
511 default:
512 BT_ERR("Unknown device type %d", hdev->dev_type);
513 break;
514 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200515}
516
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700519 struct hci_dev *hdev = req->hdev;
520
Johan Hedberg2177bab2013-03-05 20:37:43 +0200521 __le16 param;
522 __u8 flt_type;
523
524 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526
527 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529
530 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200531 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200532
533 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700536 /* Read Number of Supported IAC */
537 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
538
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700539 /* Read Current IAC LAP */
540 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
541
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542 /* Clear Event Filters */
543 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200545
546 /* Connection accept timeout ~20 secs */
547 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700550 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
551 * but it does not support page scan related HCI commands.
552 */
553 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500554 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
555 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
556 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557}
558
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200560{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300561 struct hci_dev *hdev = req->hdev;
562
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200564 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200565
566 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200568
569 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571
572 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200574
575 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300577
578 /* LE-only controllers have LE implicitly enabled */
579 if (!lmp_bredr_capable(hdev))
580 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581}
582
583static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
584{
585 if (lmp_ext_inq_capable(hdev))
586 return 0x02;
587
588 if (lmp_inq_rssi_capable(hdev))
589 return 0x01;
590
591 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
592 hdev->lmp_subver == 0x0757)
593 return 0x01;
594
595 if (hdev->manufacturer == 15) {
596 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
597 return 0x01;
598 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
599 return 0x01;
600 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
601 return 0x01;
602 }
603
604 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
605 hdev->lmp_subver == 0x1805)
606 return 0x01;
607
608 return 0x00;
609}
610
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612{
613 u8 mode;
614
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616
Johan Hedberg42c6b122013-03-05 20:37:49 +0200617 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200618}
619
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200622 struct hci_dev *hdev = req->hdev;
623
Johan Hedberg2177bab2013-03-05 20:37:43 +0200624 /* The second byte is 0xff instead of 0x9f (two reserved bits
625 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
626 * command otherwise.
627 */
628 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
629
630 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
631 * any event mask for pre 1.2 devices.
632 */
633 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
634 return;
635
636 if (lmp_bredr_capable(hdev)) {
637 events[4] |= 0x01; /* Flow Specification Complete */
638 events[4] |= 0x02; /* Inquiry Result with RSSI */
639 events[4] |= 0x04; /* Read Remote Extended Features Complete */
640 events[5] |= 0x08; /* Synchronous Connection Complete */
641 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700642 } else {
643 /* Use a different default for LE-only devices */
644 memset(events, 0, sizeof(events));
645 events[0] |= 0x10; /* Disconnection Complete */
646 events[0] |= 0x80; /* Encryption Change */
647 events[1] |= 0x08; /* Read Remote Version Information Complete */
648 events[1] |= 0x20; /* Command Complete */
649 events[1] |= 0x40; /* Command Status */
650 events[1] |= 0x80; /* Hardware Error */
651 events[2] |= 0x04; /* Number of Completed Packets */
652 events[3] |= 0x02; /* Data Buffer Overflow */
653 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200654 }
655
656 if (lmp_inq_rssi_capable(hdev))
657 events[4] |= 0x02; /* Inquiry Result with RSSI */
658
659 if (lmp_sniffsubr_capable(hdev))
660 events[5] |= 0x20; /* Sniff Subrating */
661
662 if (lmp_pause_enc_capable(hdev))
663 events[5] |= 0x80; /* Encryption Key Refresh Complete */
664
665 if (lmp_ext_inq_capable(hdev))
666 events[5] |= 0x40; /* Extended Inquiry Result */
667
668 if (lmp_no_flush_capable(hdev))
669 events[7] |= 0x01; /* Enhanced Flush Complete */
670
671 if (lmp_lsto_capable(hdev))
672 events[6] |= 0x80; /* Link Supervision Timeout Changed */
673
674 if (lmp_ssp_capable(hdev)) {
675 events[6] |= 0x01; /* IO Capability Request */
676 events[6] |= 0x02; /* IO Capability Response */
677 events[6] |= 0x04; /* User Confirmation Request */
678 events[6] |= 0x08; /* User Passkey Request */
679 events[6] |= 0x10; /* Remote OOB Data Request */
680 events[6] |= 0x20; /* Simple Pairing Complete */
681 events[7] |= 0x04; /* User Passkey Notification */
682 events[7] |= 0x08; /* Keypress Notification */
683 events[7] |= 0x10; /* Remote Host Supported
684 * Features Notification
685 */
686 }
687
688 if (lmp_le_capable(hdev))
689 events[7] |= 0x20; /* LE Meta-Event */
690
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692
693 if (lmp_le_capable(hdev)) {
694 memset(events, 0, sizeof(events));
695 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
697 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200698 }
699}
700
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 struct hci_dev *hdev = req->hdev;
704
Johan Hedberg2177bab2013-03-05 20:37:43 +0200705 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200706 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300707 else
708 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200709
710 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200711 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200712
Johan Hedberg42c6b122013-03-05 20:37:49 +0200713 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300715 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
716 * local supported commands HCI command.
717 */
718 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200719 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200720
721 if (lmp_ssp_capable(hdev)) {
722 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
723 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200724 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
725 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200726 } else {
727 struct hci_cp_write_eir cp;
728
729 memset(hdev->eir, 0, sizeof(hdev->eir));
730 memset(&cp, 0, sizeof(cp));
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200733 }
734 }
735
736 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200738
739 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200741
742 if (lmp_ext_feat_capable(hdev)) {
743 struct hci_cp_read_local_ext_features cp;
744
745 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
747 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200748 }
749
750 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
751 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
753 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200754 }
755}
756
Johan Hedberg42c6b122013-03-05 20:37:49 +0200757static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200758{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200760 struct hci_cp_write_def_link_policy cp;
761 u16 link_policy = 0;
762
763 if (lmp_rswitch_capable(hdev))
764 link_policy |= HCI_LP_RSWITCH;
765 if (lmp_hold_capable(hdev))
766 link_policy |= HCI_LP_HOLD;
767 if (lmp_sniff_capable(hdev))
768 link_policy |= HCI_LP_SNIFF;
769 if (lmp_park_capable(hdev))
770 link_policy |= HCI_LP_PARK;
771
772 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200773 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200774}
775
Johan Hedberg42c6b122013-03-05 20:37:49 +0200776static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200777{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200778 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200779 struct hci_cp_write_le_host_supported cp;
780
Johan Hedbergc73eee92013-04-19 18:35:21 +0300781 /* LE-only devices do not support explicit enablement */
782 if (!lmp_bredr_capable(hdev))
783 return;
784
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785 memset(&cp, 0, sizeof(cp));
786
787 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
788 cp.le = 0x01;
789 cp.simul = lmp_le_br_capable(hdev);
790 }
791
792 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200793 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
794 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200795}
796
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300797static void hci_set_event_mask_page_2(struct hci_request *req)
798{
799 struct hci_dev *hdev = req->hdev;
800 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
801
802 /* If Connectionless Slave Broadcast master role is supported
803 * enable all necessary events for it.
804 */
805 if (hdev->features[2][0] & 0x01) {
806 events[1] |= 0x40; /* Triggered Clock Capture */
807 events[1] |= 0x80; /* Synchronization Train Complete */
808 events[2] |= 0x10; /* Slave Page Response Timeout */
809 events[2] |= 0x20; /* CSB Channel Map Change */
810 }
811
812 /* If Connectionless Slave Broadcast slave role is supported
813 * enable all necessary events for it.
814 */
815 if (hdev->features[2][0] & 0x02) {
816 events[2] |= 0x01; /* Synchronization Train Received */
817 events[2] |= 0x02; /* CSB Receive */
818 events[2] |= 0x04; /* CSB Timeout */
819 events[2] |= 0x08; /* Truncated Page Complete */
820 }
821
822 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
823}
824
Johan Hedberg42c6b122013-03-05 20:37:49 +0200825static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200826{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200827 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300828 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200829
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100830 /* Some Broadcom based Bluetooth controllers do not support the
831 * Delete Stored Link Key command. They are clearly indicating its
832 * absence in the bit mask of supported commands.
833 *
834 * Check the supported commands and only if the the command is marked
835 * as supported send it. If not supported assume that the controller
836 * does not have actual support for stored link keys which makes this
837 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700838 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300839 if (hdev->commands[6] & 0x80) {
840 struct hci_cp_delete_stored_link_key cp;
841
842 bacpy(&cp.bdaddr, BDADDR_ANY);
843 cp.delete_all = 0x01;
844 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
845 sizeof(cp), &cp);
846 }
847
Johan Hedberg2177bab2013-03-05 20:37:43 +0200848 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200849 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200850
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700851 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200852 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300853
854 /* Read features beyond page 1 if available */
855 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
856 struct hci_cp_read_local_ext_features cp;
857
858 cp.page = p;
859 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
860 sizeof(cp), &cp);
861 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200862}
863
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300864static void hci_init4_req(struct hci_request *req, unsigned long opt)
865{
866 struct hci_dev *hdev = req->hdev;
867
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300868 /* Set event mask page 2 if the HCI command for it is supported */
869 if (hdev->commands[22] & 0x04)
870 hci_set_event_mask_page_2(req);
871
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300872 /* Check for Synchronization Train support */
873 if (hdev->features[2][0] & 0x04)
874 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
875}
876
Johan Hedberg2177bab2013-03-05 20:37:43 +0200877static int __hci_init(struct hci_dev *hdev)
878{
879 int err;
880
881 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
882 if (err < 0)
883 return err;
884
885 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
886 * BR/EDR/LE type controllers. AMP controllers only need the
887 * first stage init.
888 */
889 if (hdev->dev_type != HCI_BREDR)
890 return 0;
891
892 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
893 if (err < 0)
894 return err;
895
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300896 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
897 if (err < 0)
898 return err;
899
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700900 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
901 if (err < 0)
902 return err;
903
904 /* Only create debugfs entries during the initial setup
905 * phase and not every time the controller gets powered on.
906 */
907 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
908 return 0;
909
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700910 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
911 &blacklist_fops);
912
Marcel Holtmann47219832013-10-17 17:24:15 -0700913 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
914
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700915 if (lmp_bredr_capable(hdev)) {
916 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
917 hdev, &inquiry_cache_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -0700918 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
919 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700920 }
921
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700922 if (lmp_ssp_capable(hdev))
923 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
924 hdev, &auto_accept_delay_fops);
925
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700926 if (lmp_le_capable(hdev))
927 debugfs_create_file("static_address", 0444, hdev->debugfs,
928 hdev, &static_address_fops);
929
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700930 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200931}
932
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934{
935 __u8 scan = opt;
936
Johan Hedberg42c6b122013-03-05 20:37:49 +0200937 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200940 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941}
942
Johan Hedberg42c6b122013-03-05 20:37:49 +0200943static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
945 __u8 auth = opt;
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
949 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
955 __u8 encrypt = opt;
956
Johan Hedberg42c6b122013-03-05 20:37:49 +0200957 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200959 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200960 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961}
962
Johan Hedberg42c6b122013-03-05 20:37:49 +0200963static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200964{
965 __le16 policy = cpu_to_le16(opt);
966
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200968
969 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200971}
972
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900973/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 * Device is held on return. */
975struct hci_dev *hci_dev_get(int index)
976{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200977 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
979 BT_DBG("%d", index);
980
981 if (index < 0)
982 return NULL;
983
984 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200985 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (d->id == index) {
987 hdev = hci_dev_hold(d);
988 break;
989 }
990 }
991 read_unlock(&hci_dev_list_lock);
992 return hdev;
993}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200996
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200997bool hci_discovery_active(struct hci_dev *hdev)
998{
999 struct discovery_state *discov = &hdev->discovery;
1000
Andre Guedes6fbe1952012-02-03 17:47:58 -03001001 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001002 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001003 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001004 return true;
1005
Andre Guedes6fbe1952012-02-03 17:47:58 -03001006 default:
1007 return false;
1008 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001009}
1010
Johan Hedbergff9ef572012-01-04 14:23:45 +02001011void hci_discovery_set_state(struct hci_dev *hdev, int state)
1012{
1013 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1014
1015 if (hdev->discovery.state == state)
1016 return;
1017
1018 switch (state) {
1019 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001020 if (hdev->discovery.state != DISCOVERY_STARTING)
1021 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001022 break;
1023 case DISCOVERY_STARTING:
1024 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001025 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001026 mgmt_discovering(hdev, 1);
1027 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001028 case DISCOVERY_RESOLVING:
1029 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001030 case DISCOVERY_STOPPING:
1031 break;
1032 }
1033
1034 hdev->discovery.state = state;
1035}
1036
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001037void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
Johan Hedberg30883512012-01-04 14:16:21 +02001039 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001040 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Johan Hedberg561aafb2012-01-04 13:31:59 +02001042 list_for_each_entry_safe(p, n, &cache->all, all) {
1043 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001044 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001046
1047 INIT_LIST_HEAD(&cache->unknown);
1048 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049}
1050
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001051struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1052 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053{
Johan Hedberg30883512012-01-04 14:16:21 +02001054 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 struct inquiry_entry *e;
1056
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001057 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Johan Hedberg561aafb2012-01-04 13:31:59 +02001059 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001061 return e;
1062 }
1063
1064 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066
Johan Hedberg561aafb2012-01-04 13:31:59 +02001067struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001068 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001069{
Johan Hedberg30883512012-01-04 14:16:21 +02001070 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001071 struct inquiry_entry *e;
1072
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001073 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001074
1075 list_for_each_entry(e, &cache->unknown, list) {
1076 if (!bacmp(&e->data.bdaddr, bdaddr))
1077 return e;
1078 }
1079
1080 return NULL;
1081}
1082
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001083struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001084 bdaddr_t *bdaddr,
1085 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001086{
1087 struct discovery_state *cache = &hdev->discovery;
1088 struct inquiry_entry *e;
1089
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001090 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001091
1092 list_for_each_entry(e, &cache->resolve, list) {
1093 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1094 return e;
1095 if (!bacmp(&e->data.bdaddr, bdaddr))
1096 return e;
1097 }
1098
1099 return NULL;
1100}
1101
Johan Hedberga3d4e202012-01-09 00:53:02 +02001102void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001103 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001104{
1105 struct discovery_state *cache = &hdev->discovery;
1106 struct list_head *pos = &cache->resolve;
1107 struct inquiry_entry *p;
1108
1109 list_del(&ie->list);
1110
1111 list_for_each_entry(p, &cache->resolve, list) {
1112 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001113 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001114 break;
1115 pos = &p->list;
1116 }
1117
1118 list_add(&ie->list, pos);
1119}
1120
Johan Hedberg31754052012-01-04 13:39:52 +02001121bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001122 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123{
Johan Hedberg30883512012-01-04 14:16:21 +02001124 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001125 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001127 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Szymon Janc2b2fec42012-11-20 11:38:54 +01001129 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1130
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001131 if (ssp)
1132 *ssp = data->ssp_mode;
1133
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001134 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001135 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001136 if (ie->data.ssp_mode && ssp)
1137 *ssp = true;
1138
Johan Hedberga3d4e202012-01-09 00:53:02 +02001139 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001140 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001141 ie->data.rssi = data->rssi;
1142 hci_inquiry_cache_update_resolve(hdev, ie);
1143 }
1144
Johan Hedberg561aafb2012-01-04 13:31:59 +02001145 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001146 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001147
Johan Hedberg561aafb2012-01-04 13:31:59 +02001148 /* Entry not in the cache. Add new one. */
1149 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1150 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001151 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001152
1153 list_add(&ie->all, &cache->all);
1154
1155 if (name_known) {
1156 ie->name_state = NAME_KNOWN;
1157 } else {
1158 ie->name_state = NAME_NOT_KNOWN;
1159 list_add(&ie->list, &cache->unknown);
1160 }
1161
1162update:
1163 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001164 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001165 ie->name_state = NAME_KNOWN;
1166 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 }
1168
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001169 memcpy(&ie->data, data, sizeof(*data));
1170 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001172
1173 if (ie->name_state == NAME_NOT_KNOWN)
1174 return false;
1175
1176 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
1178
1179static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1180{
Johan Hedberg30883512012-01-04 14:16:21 +02001181 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 struct inquiry_info *info = (struct inquiry_info *) buf;
1183 struct inquiry_entry *e;
1184 int copied = 0;
1185
Johan Hedberg561aafb2012-01-04 13:31:59 +02001186 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001188
1189 if (copied >= num)
1190 break;
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 bacpy(&info->bdaddr, &data->bdaddr);
1193 info->pscan_rep_mode = data->pscan_rep_mode;
1194 info->pscan_period_mode = data->pscan_period_mode;
1195 info->pscan_mode = data->pscan_mode;
1196 memcpy(info->dev_class, data->dev_class, 3);
1197 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001198
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001200 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 }
1202
1203 BT_DBG("cache %p, copied %d", cache, copied);
1204 return copied;
1205}
1206
Johan Hedberg42c6b122013-03-05 20:37:49 +02001207static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
1209 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 struct hci_cp_inquiry cp;
1212
1213 BT_DBG("%s", hdev->name);
1214
1215 if (test_bit(HCI_INQUIRY, &hdev->flags))
1216 return;
1217
1218 /* Start Inquiry */
1219 memcpy(&cp.lap, &ir->lap, 3);
1220 cp.length = ir->length;
1221 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223}
1224
Andre Guedes3e13fa12013-03-27 20:04:56 -03001225static int wait_inquiry(void *word)
1226{
1227 schedule();
1228 return signal_pending(current);
1229}
1230
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231int hci_inquiry(void __user *arg)
1232{
1233 __u8 __user *ptr = arg;
1234 struct hci_inquiry_req ir;
1235 struct hci_dev *hdev;
1236 int err = 0, do_inquiry = 0, max_rsp;
1237 long timeo;
1238 __u8 *buf;
1239
1240 if (copy_from_user(&ir, ptr, sizeof(ir)))
1241 return -EFAULT;
1242
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001243 hdev = hci_dev_get(ir.dev_id);
1244 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 return -ENODEV;
1246
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001247 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1248 err = -EBUSY;
1249 goto done;
1250 }
1251
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001252 if (hdev->dev_type != HCI_BREDR) {
1253 err = -EOPNOTSUPP;
1254 goto done;
1255 }
1256
Johan Hedberg56f87902013-10-02 13:43:13 +03001257 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1258 err = -EOPNOTSUPP;
1259 goto done;
1260 }
1261
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001262 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001263 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001264 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001265 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 do_inquiry = 1;
1267 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001268 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
Marcel Holtmann04837f62006-07-03 10:02:33 +02001270 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001271
1272 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001273 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1274 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001275 if (err < 0)
1276 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001277
1278 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1279 * cleared). If it is interrupted by a signal, return -EINTR.
1280 */
1281 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1282 TASK_INTERRUPTIBLE))
1283 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001286 /* for unlimited number of responses we will use buffer with
1287 * 255 entries
1288 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1290
1291 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1292 * copy it to the user space.
1293 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001294 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001295 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 err = -ENOMEM;
1297 goto done;
1298 }
1299
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001300 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001302 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 BT_DBG("num_rsp %d", ir.num_rsp);
1305
1306 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1307 ptr += sizeof(ir);
1308 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001309 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001311 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 err = -EFAULT;
1313
1314 kfree(buf);
1315
1316done:
1317 hci_dev_put(hdev);
1318 return err;
1319}
1320
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001321static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 int ret = 0;
1324
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 BT_DBG("%s %p", hdev->name, hdev);
1326
1327 hci_req_lock(hdev);
1328
Johan Hovold94324962012-03-15 14:48:41 +01001329 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1330 ret = -ENODEV;
1331 goto done;
1332 }
1333
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001334 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1335 /* Check for rfkill but allow the HCI setup stage to
1336 * proceed (which in itself doesn't cause any RF activity).
1337 */
1338 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1339 ret = -ERFKILL;
1340 goto done;
1341 }
1342
1343 /* Check for valid public address or a configured static
1344 * random adddress, but let the HCI setup proceed to
1345 * be able to determine if there is a public address
1346 * or not.
1347 *
1348 * This check is only valid for BR/EDR controllers
1349 * since AMP controllers do not have an address.
1350 */
1351 if (hdev->dev_type == HCI_BREDR &&
1352 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1353 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1354 ret = -EADDRNOTAVAIL;
1355 goto done;
1356 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001357 }
1358
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 if (test_bit(HCI_UP, &hdev->flags)) {
1360 ret = -EALREADY;
1361 goto done;
1362 }
1363
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 if (hdev->open(hdev)) {
1365 ret = -EIO;
1366 goto done;
1367 }
1368
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001369 atomic_set(&hdev->cmd_cnt, 1);
1370 set_bit(HCI_INIT, &hdev->flags);
1371
1372 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1373 ret = hdev->setup(hdev);
1374
1375 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001376 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1377 set_bit(HCI_RAW, &hdev->flags);
1378
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001379 if (!test_bit(HCI_RAW, &hdev->flags) &&
1380 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001381 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
1383
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001384 clear_bit(HCI_INIT, &hdev->flags);
1385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 if (!ret) {
1387 hci_dev_hold(hdev);
1388 set_bit(HCI_UP, &hdev->flags);
1389 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001390 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001391 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001392 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001393 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001394 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001395 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001396 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001397 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001399 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001400 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001401 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
1403 skb_queue_purge(&hdev->cmd_q);
1404 skb_queue_purge(&hdev->rx_q);
1405
1406 if (hdev->flush)
1407 hdev->flush(hdev);
1408
1409 if (hdev->sent_cmd) {
1410 kfree_skb(hdev->sent_cmd);
1411 hdev->sent_cmd = NULL;
1412 }
1413
1414 hdev->close(hdev);
1415 hdev->flags = 0;
1416 }
1417
1418done:
1419 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 return ret;
1421}
1422
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001423/* ---- HCI ioctl helpers ---- */
1424
1425int hci_dev_open(__u16 dev)
1426{
1427 struct hci_dev *hdev;
1428 int err;
1429
1430 hdev = hci_dev_get(dev);
1431 if (!hdev)
1432 return -ENODEV;
1433
Johan Hedberge1d08f42013-10-01 22:44:50 +03001434 /* We need to ensure that no other power on/off work is pending
1435 * before proceeding to call hci_dev_do_open. This is
1436 * particularly important if the setup procedure has not yet
1437 * completed.
1438 */
1439 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1440 cancel_delayed_work(&hdev->power_off);
1441
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001442 /* After this call it is guaranteed that the setup procedure
1443 * has finished. This means that error conditions like RFKILL
1444 * or no valid public or static random address apply.
1445 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001446 flush_workqueue(hdev->req_workqueue);
1447
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001448 err = hci_dev_do_open(hdev);
1449
1450 hci_dev_put(hdev);
1451
1452 return err;
1453}
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455static int hci_dev_do_close(struct hci_dev *hdev)
1456{
1457 BT_DBG("%s %p", hdev->name, hdev);
1458
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001459 cancel_delayed_work(&hdev->power_off);
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 hci_req_cancel(hdev, ENODEV);
1462 hci_req_lock(hdev);
1463
1464 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001465 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 hci_req_unlock(hdev);
1467 return 0;
1468 }
1469
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001470 /* Flush RX and TX works */
1471 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001472 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001474 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001475 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001476 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001477 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001478 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001479 }
1480
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001481 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001482 cancel_delayed_work(&hdev->service_cache);
1483
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001484 cancel_delayed_work_sync(&hdev->le_scan_disable);
1485
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001486 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001487 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001489 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 hci_notify(hdev, HCI_DEV_DOWN);
1492
1493 if (hdev->flush)
1494 hdev->flush(hdev);
1495
1496 /* Reset device */
1497 skb_queue_purge(&hdev->cmd_q);
1498 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001499 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001500 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001501 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001503 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 clear_bit(HCI_INIT, &hdev->flags);
1505 }
1506
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001507 /* flush cmd work */
1508 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510 /* Drop queues */
1511 skb_queue_purge(&hdev->rx_q);
1512 skb_queue_purge(&hdev->cmd_q);
1513 skb_queue_purge(&hdev->raw_q);
1514
1515 /* Drop last sent command */
1516 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001517 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 kfree_skb(hdev->sent_cmd);
1519 hdev->sent_cmd = NULL;
1520 }
1521
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001522 kfree_skb(hdev->recv_evt);
1523 hdev->recv_evt = NULL;
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 /* After this point our queues are empty
1526 * and no tasks are scheduled. */
1527 hdev->close(hdev);
1528
Johan Hedberg35b973c2013-03-15 17:06:59 -05001529 /* Clear flags */
1530 hdev->flags = 0;
1531 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1532
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001533 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1534 if (hdev->dev_type == HCI_BREDR) {
1535 hci_dev_lock(hdev);
1536 mgmt_powered(hdev, 0);
1537 hci_dev_unlock(hdev);
1538 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001539 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001540
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001541 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001542 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001543
Johan Hedberge59fda82012-02-22 18:11:53 +02001544 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001545 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 hci_req_unlock(hdev);
1548
1549 hci_dev_put(hdev);
1550 return 0;
1551}
1552
1553int hci_dev_close(__u16 dev)
1554{
1555 struct hci_dev *hdev;
1556 int err;
1557
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001558 hdev = hci_dev_get(dev);
1559 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001561
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001562 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1563 err = -EBUSY;
1564 goto done;
1565 }
1566
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001567 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1568 cancel_delayed_work(&hdev->power_off);
1569
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001571
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001572done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 hci_dev_put(hdev);
1574 return err;
1575}
1576
1577int hci_dev_reset(__u16 dev)
1578{
1579 struct hci_dev *hdev;
1580 int ret = 0;
1581
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001582 hdev = hci_dev_get(dev);
1583 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 return -ENODEV;
1585
1586 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Marcel Holtmann808a0492013-08-26 20:57:58 -07001588 if (!test_bit(HCI_UP, &hdev->flags)) {
1589 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001593 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1594 ret = -EBUSY;
1595 goto done;
1596 }
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 /* Drop queues */
1599 skb_queue_purge(&hdev->rx_q);
1600 skb_queue_purge(&hdev->cmd_q);
1601
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001602 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001603 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001605 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
1607 if (hdev->flush)
1608 hdev->flush(hdev);
1609
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001610 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001611 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001614 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 hci_req_unlock(hdev);
1618 hci_dev_put(hdev);
1619 return ret;
1620}
1621
1622int hci_dev_reset_stat(__u16 dev)
1623{
1624 struct hci_dev *hdev;
1625 int ret = 0;
1626
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001627 hdev = hci_dev_get(dev);
1628 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 return -ENODEV;
1630
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001631 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1632 ret = -EBUSY;
1633 goto done;
1634 }
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1637
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001638done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 return ret;
1641}
1642
1643int hci_dev_cmd(unsigned int cmd, void __user *arg)
1644{
1645 struct hci_dev *hdev;
1646 struct hci_dev_req dr;
1647 int err = 0;
1648
1649 if (copy_from_user(&dr, arg, sizeof(dr)))
1650 return -EFAULT;
1651
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001652 hdev = hci_dev_get(dr.dev_id);
1653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 return -ENODEV;
1655
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001656 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1657 err = -EBUSY;
1658 goto done;
1659 }
1660
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001661 if (hdev->dev_type != HCI_BREDR) {
1662 err = -EOPNOTSUPP;
1663 goto done;
1664 }
1665
Johan Hedberg56f87902013-10-02 13:43:13 +03001666 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1667 err = -EOPNOTSUPP;
1668 goto done;
1669 }
1670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 switch (cmd) {
1672 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001673 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1674 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 break;
1676
1677 case HCISETENCRYPT:
1678 if (!lmp_encrypt_capable(hdev)) {
1679 err = -EOPNOTSUPP;
1680 break;
1681 }
1682
1683 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1684 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001685 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1686 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 if (err)
1688 break;
1689 }
1690
Johan Hedberg01178cd2013-03-05 20:37:41 +02001691 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1692 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 break;
1694
1695 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001696 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1697 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 break;
1699
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001700 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001701 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1702 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001703 break;
1704
1705 case HCISETLINKMODE:
1706 hdev->link_mode = ((__u16) dr.dev_opt) &
1707 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1708 break;
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 case HCISETPTYPE:
1711 hdev->pkt_type = (__u16) dr.dev_opt;
1712 break;
1713
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001715 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1716 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 break;
1718
1719 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001720 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1721 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 break;
1723
1724 default:
1725 err = -EINVAL;
1726 break;
1727 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001728
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001729done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 hci_dev_put(hdev);
1731 return err;
1732}
1733
1734int hci_get_dev_list(void __user *arg)
1735{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001736 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 struct hci_dev_list_req *dl;
1738 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 int n = 0, size, err;
1740 __u16 dev_num;
1741
1742 if (get_user(dev_num, (__u16 __user *) arg))
1743 return -EFAULT;
1744
1745 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1746 return -EINVAL;
1747
1748 size = sizeof(*dl) + dev_num * sizeof(*dr);
1749
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001750 dl = kzalloc(size, GFP_KERNEL);
1751 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 return -ENOMEM;
1753
1754 dr = dl->dev_req;
1755
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001756 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001757 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001758 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001759 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001760
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001761 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1762 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001763
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 (dr + n)->dev_id = hdev->id;
1765 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 if (++n >= dev_num)
1768 break;
1769 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001770 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
1772 dl->dev_num = n;
1773 size = sizeof(*dl) + n * sizeof(*dr);
1774
1775 err = copy_to_user(arg, dl, size);
1776 kfree(dl);
1777
1778 return err ? -EFAULT : 0;
1779}
1780
1781int hci_get_dev_info(void __user *arg)
1782{
1783 struct hci_dev *hdev;
1784 struct hci_dev_info di;
1785 int err = 0;
1786
1787 if (copy_from_user(&di, arg, sizeof(di)))
1788 return -EFAULT;
1789
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001790 hdev = hci_dev_get(di.dev_id);
1791 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 return -ENODEV;
1793
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001794 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001795 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001796
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001797 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1798 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001799
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 strcpy(di.name, hdev->name);
1801 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001802 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 di.flags = hdev->flags;
1804 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001805 if (lmp_bredr_capable(hdev)) {
1806 di.acl_mtu = hdev->acl_mtu;
1807 di.acl_pkts = hdev->acl_pkts;
1808 di.sco_mtu = hdev->sco_mtu;
1809 di.sco_pkts = hdev->sco_pkts;
1810 } else {
1811 di.acl_mtu = hdev->le_mtu;
1812 di.acl_pkts = hdev->le_pkts;
1813 di.sco_mtu = 0;
1814 di.sco_pkts = 0;
1815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 di.link_policy = hdev->link_policy;
1817 di.link_mode = hdev->link_mode;
1818
1819 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1820 memcpy(&di.features, &hdev->features, sizeof(di.features));
1821
1822 if (copy_to_user(arg, &di, sizeof(di)))
1823 err = -EFAULT;
1824
1825 hci_dev_put(hdev);
1826
1827 return err;
1828}
1829
1830/* ---- Interface to HCI drivers ---- */
1831
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001832static int hci_rfkill_set_block(void *data, bool blocked)
1833{
1834 struct hci_dev *hdev = data;
1835
1836 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1837
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001838 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1839 return -EBUSY;
1840
Johan Hedberg5e130362013-09-13 08:58:17 +03001841 if (blocked) {
1842 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001843 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1844 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001845 } else {
1846 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001847 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001848
1849 return 0;
1850}
1851
1852static const struct rfkill_ops hci_rfkill_ops = {
1853 .set_block = hci_rfkill_set_block,
1854};
1855
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001856static void hci_power_on(struct work_struct *work)
1857{
1858 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001859 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001860
1861 BT_DBG("%s", hdev->name);
1862
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001863 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001864 if (err < 0) {
1865 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001866 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001867 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001868
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001869 /* During the HCI setup phase, a few error conditions are
1870 * ignored and they need to be checked now. If they are still
1871 * valid, it is important to turn the device back off.
1872 */
1873 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1874 (hdev->dev_type == HCI_BREDR &&
1875 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1876 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001877 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1878 hci_dev_do_close(hdev);
1879 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001880 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1881 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001882 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001883
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001884 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001885 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001886}
1887
1888static void hci_power_off(struct work_struct *work)
1889{
Johan Hedberg32435532011-11-07 22:16:04 +02001890 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001891 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001892
1893 BT_DBG("%s", hdev->name);
1894
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001895 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001896}
1897
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001898static void hci_discov_off(struct work_struct *work)
1899{
1900 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001901
1902 hdev = container_of(work, struct hci_dev, discov_off.work);
1903
1904 BT_DBG("%s", hdev->name);
1905
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001906 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001907}
1908
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001909int hci_uuids_clear(struct hci_dev *hdev)
1910{
Johan Hedberg48210022013-01-27 00:31:28 +02001911 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001912
Johan Hedberg48210022013-01-27 00:31:28 +02001913 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1914 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001915 kfree(uuid);
1916 }
1917
1918 return 0;
1919}
1920
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001921int hci_link_keys_clear(struct hci_dev *hdev)
1922{
1923 struct list_head *p, *n;
1924
1925 list_for_each_safe(p, n, &hdev->link_keys) {
1926 struct link_key *key;
1927
1928 key = list_entry(p, struct link_key, list);
1929
1930 list_del(p);
1931 kfree(key);
1932 }
1933
1934 return 0;
1935}
1936
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001937int hci_smp_ltks_clear(struct hci_dev *hdev)
1938{
1939 struct smp_ltk *k, *tmp;
1940
1941 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1942 list_del(&k->list);
1943 kfree(k);
1944 }
1945
1946 return 0;
1947}
1948
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001949struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1950{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001951 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001952
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001953 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001954 if (bacmp(bdaddr, &k->bdaddr) == 0)
1955 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001956
1957 return NULL;
1958}
1959
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301960static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001961 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001962{
1963 /* Legacy key */
1964 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301965 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001966
1967 /* Debug keys are insecure so don't store them persistently */
1968 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301969 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001970
1971 /* Changed combination key and there's no previous one */
1972 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301973 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001974
1975 /* Security mode 3 case */
1976 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301977 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001978
1979 /* Neither local nor remote side had no-bonding as requirement */
1980 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301981 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001982
1983 /* Local side had dedicated bonding as requirement */
1984 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301985 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001986
1987 /* Remote side had dedicated bonding as requirement */
1988 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301989 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001990
1991 /* If none of the above criteria match, then don't store the key
1992 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301993 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001994}
1995
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001996struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001997{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001998 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001999
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002000 list_for_each_entry(k, &hdev->long_term_keys, list) {
2001 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002002 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002003 continue;
2004
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002005 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002006 }
2007
2008 return NULL;
2009}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002010
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002011struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002012 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002013{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002014 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002015
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002016 list_for_each_entry(k, &hdev->long_term_keys, list)
2017 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002018 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002019 return k;
2020
2021 return NULL;
2022}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002023
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002024int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002025 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002026{
2027 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302028 u8 old_key_type;
2029 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002030
2031 old_key = hci_find_link_key(hdev, bdaddr);
2032 if (old_key) {
2033 old_key_type = old_key->type;
2034 key = old_key;
2035 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002036 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002037 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2038 if (!key)
2039 return -ENOMEM;
2040 list_add(&key->list, &hdev->link_keys);
2041 }
2042
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002043 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002044
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002045 /* Some buggy controller combinations generate a changed
2046 * combination key for legacy pairing even when there's no
2047 * previous key */
2048 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002049 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002050 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002051 if (conn)
2052 conn->key_type = type;
2053 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002054
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002055 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002056 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002057 key->pin_len = pin_len;
2058
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002059 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002060 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002061 else
2062 key->type = type;
2063
Johan Hedberg4df378a2011-04-28 11:29:03 -07002064 if (!new_key)
2065 return 0;
2066
2067 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2068
Johan Hedberg744cf192011-11-08 20:40:14 +02002069 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002070
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302071 if (conn)
2072 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002073
2074 return 0;
2075}
2076
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002077int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002078 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002079 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002080{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002081 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002082
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002083 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2084 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002085
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002086 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2087 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002088 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002089 else {
2090 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002091 if (!key)
2092 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002093 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002094 }
2095
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002096 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002097 key->bdaddr_type = addr_type;
2098 memcpy(key->val, tk, sizeof(key->val));
2099 key->authenticated = authenticated;
2100 key->ediv = ediv;
2101 key->enc_size = enc_size;
2102 key->type = type;
2103 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002104
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002105 if (!new_key)
2106 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002107
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002108 if (type & HCI_SMP_LTK)
2109 mgmt_new_ltk(hdev, key, 1);
2110
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002111 return 0;
2112}
2113
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002114int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2115{
2116 struct link_key *key;
2117
2118 key = hci_find_link_key(hdev, bdaddr);
2119 if (!key)
2120 return -ENOENT;
2121
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002122 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002123
2124 list_del(&key->list);
2125 kfree(key);
2126
2127 return 0;
2128}
2129
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002130int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2131{
2132 struct smp_ltk *k, *tmp;
2133
2134 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2135 if (bacmp(bdaddr, &k->bdaddr))
2136 continue;
2137
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002138 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002139
2140 list_del(&k->list);
2141 kfree(k);
2142 }
2143
2144 return 0;
2145}
2146
Ville Tervo6bd32322011-02-16 16:32:41 +02002147/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002148static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002149{
2150 struct hci_dev *hdev = (void *) arg;
2151
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002152 if (hdev->sent_cmd) {
2153 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2154 u16 opcode = __le16_to_cpu(sent->opcode);
2155
2156 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2157 } else {
2158 BT_ERR("%s command tx timeout", hdev->name);
2159 }
2160
Ville Tervo6bd32322011-02-16 16:32:41 +02002161 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002162 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002163}
2164
Szymon Janc2763eda2011-03-22 13:12:22 +01002165struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002166 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002167{
2168 struct oob_data *data;
2169
2170 list_for_each_entry(data, &hdev->remote_oob_data, list)
2171 if (bacmp(bdaddr, &data->bdaddr) == 0)
2172 return data;
2173
2174 return NULL;
2175}
2176
2177int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2178{
2179 struct oob_data *data;
2180
2181 data = hci_find_remote_oob_data(hdev, bdaddr);
2182 if (!data)
2183 return -ENOENT;
2184
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002185 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002186
2187 list_del(&data->list);
2188 kfree(data);
2189
2190 return 0;
2191}
2192
2193int hci_remote_oob_data_clear(struct hci_dev *hdev)
2194{
2195 struct oob_data *data, *n;
2196
2197 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2198 list_del(&data->list);
2199 kfree(data);
2200 }
2201
2202 return 0;
2203}
2204
2205int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002206 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002207{
2208 struct oob_data *data;
2209
2210 data = hci_find_remote_oob_data(hdev, bdaddr);
2211
2212 if (!data) {
2213 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2214 if (!data)
2215 return -ENOMEM;
2216
2217 bacpy(&data->bdaddr, bdaddr);
2218 list_add(&data->list, &hdev->remote_oob_data);
2219 }
2220
2221 memcpy(data->hash, hash, sizeof(data->hash));
2222 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2223
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002224 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002225
2226 return 0;
2227}
2228
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002229struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2230 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002231{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002232 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002233
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002234 list_for_each_entry(b, &hdev->blacklist, list) {
2235 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002236 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002237 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002238
2239 return NULL;
2240}
2241
2242int hci_blacklist_clear(struct hci_dev *hdev)
2243{
2244 struct list_head *p, *n;
2245
2246 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002247 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002248
2249 list_del(p);
2250 kfree(b);
2251 }
2252
2253 return 0;
2254}
2255
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002256int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002257{
2258 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002259
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002260 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002261 return -EBADF;
2262
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002263 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002264 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002265
2266 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002267 if (!entry)
2268 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002269
2270 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002271 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002272
2273 list_add(&entry->list, &hdev->blacklist);
2274
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002275 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002276}
2277
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002278int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002279{
2280 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002281
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002282 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002283 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002284
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002285 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002286 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002287 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002288
2289 list_del(&entry->list);
2290 kfree(entry);
2291
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002292 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002293}
2294
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002295static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002296{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002297 if (status) {
2298 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002299
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002300 hci_dev_lock(hdev);
2301 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2302 hci_dev_unlock(hdev);
2303 return;
2304 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002305}
2306
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002307static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002308{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002309 /* General inquiry access code (GIAC) */
2310 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2311 struct hci_request req;
2312 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002313 int err;
2314
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002315 if (status) {
2316 BT_ERR("Failed to disable LE scanning: status %d", status);
2317 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002318 }
2319
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002320 switch (hdev->discovery.type) {
2321 case DISCOV_TYPE_LE:
2322 hci_dev_lock(hdev);
2323 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2324 hci_dev_unlock(hdev);
2325 break;
2326
2327 case DISCOV_TYPE_INTERLEAVED:
2328 hci_req_init(&req, hdev);
2329
2330 memset(&cp, 0, sizeof(cp));
2331 memcpy(&cp.lap, lap, sizeof(cp.lap));
2332 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2333 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2334
2335 hci_dev_lock(hdev);
2336
2337 hci_inquiry_cache_flush(hdev);
2338
2339 err = hci_req_run(&req, inquiry_complete);
2340 if (err) {
2341 BT_ERR("Inquiry request failed: err %d", err);
2342 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2343 }
2344
2345 hci_dev_unlock(hdev);
2346 break;
2347 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002348}
2349
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002350static void le_scan_disable_work(struct work_struct *work)
2351{
2352 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002353 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002354 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002355 struct hci_request req;
2356 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002357
2358 BT_DBG("%s", hdev->name);
2359
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002360 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002361
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002362 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002363 cp.enable = LE_SCAN_DISABLE;
2364 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002365
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002366 err = hci_req_run(&req, le_scan_disable_work_complete);
2367 if (err)
2368 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002369}
2370
David Herrmann9be0dab2012-04-22 14:39:57 +02002371/* Alloc HCI device */
2372struct hci_dev *hci_alloc_dev(void)
2373{
2374 struct hci_dev *hdev;
2375
2376 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2377 if (!hdev)
2378 return NULL;
2379
David Herrmannb1b813d2012-04-22 14:39:58 +02002380 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2381 hdev->esco_type = (ESCO_HV1);
2382 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002383 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2384 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002385 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2386 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002387
David Herrmannb1b813d2012-04-22 14:39:58 +02002388 hdev->sniff_max_interval = 800;
2389 hdev->sniff_min_interval = 80;
2390
Marcel Holtmannbef64732013-10-11 08:23:19 -07002391 hdev->le_scan_interval = 0x0060;
2392 hdev->le_scan_window = 0x0030;
2393
David Herrmannb1b813d2012-04-22 14:39:58 +02002394 mutex_init(&hdev->lock);
2395 mutex_init(&hdev->req_lock);
2396
2397 INIT_LIST_HEAD(&hdev->mgmt_pending);
2398 INIT_LIST_HEAD(&hdev->blacklist);
2399 INIT_LIST_HEAD(&hdev->uuids);
2400 INIT_LIST_HEAD(&hdev->link_keys);
2401 INIT_LIST_HEAD(&hdev->long_term_keys);
2402 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002403 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002404
2405 INIT_WORK(&hdev->rx_work, hci_rx_work);
2406 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2407 INIT_WORK(&hdev->tx_work, hci_tx_work);
2408 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002409
David Herrmannb1b813d2012-04-22 14:39:58 +02002410 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2411 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2412 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2413
David Herrmannb1b813d2012-04-22 14:39:58 +02002414 skb_queue_head_init(&hdev->rx_q);
2415 skb_queue_head_init(&hdev->cmd_q);
2416 skb_queue_head_init(&hdev->raw_q);
2417
2418 init_waitqueue_head(&hdev->req_wait_q);
2419
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002420 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002421
David Herrmannb1b813d2012-04-22 14:39:58 +02002422 hci_init_sysfs(hdev);
2423 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002424
2425 return hdev;
2426}
2427EXPORT_SYMBOL(hci_alloc_dev);
2428
2429/* Free HCI device */
2430void hci_free_dev(struct hci_dev *hdev)
2431{
David Herrmann9be0dab2012-04-22 14:39:57 +02002432 /* will free via device release */
2433 put_device(&hdev->dev);
2434}
2435EXPORT_SYMBOL(hci_free_dev);
2436
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437/* Register HCI device */
2438int hci_register_dev(struct hci_dev *hdev)
2439{
David Herrmannb1b813d2012-04-22 14:39:58 +02002440 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
David Herrmann010666a2012-01-07 15:47:07 +01002442 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 return -EINVAL;
2444
Mat Martineau08add512011-11-02 16:18:36 -07002445 /* Do not allow HCI_AMP devices to register at index 0,
2446 * so the index can be used as the AMP controller ID.
2447 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002448 switch (hdev->dev_type) {
2449 case HCI_BREDR:
2450 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2451 break;
2452 case HCI_AMP:
2453 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2454 break;
2455 default:
2456 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002458
Sasha Levin3df92b32012-05-27 22:36:56 +02002459 if (id < 0)
2460 return id;
2461
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 sprintf(hdev->name, "hci%d", id);
2463 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002464
2465 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2466
Kees Cookd8537542013-07-03 15:04:57 -07002467 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2468 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002469 if (!hdev->workqueue) {
2470 error = -ENOMEM;
2471 goto err;
2472 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002473
Kees Cookd8537542013-07-03 15:04:57 -07002474 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2475 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002476 if (!hdev->req_workqueue) {
2477 destroy_workqueue(hdev->workqueue);
2478 error = -ENOMEM;
2479 goto err;
2480 }
2481
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002482 if (!IS_ERR_OR_NULL(bt_debugfs))
2483 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2484
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002485 dev_set_name(&hdev->dev, "%s", hdev->name);
2486
2487 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002488 if (error < 0)
2489 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002491 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002492 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2493 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002494 if (hdev->rfkill) {
2495 if (rfkill_register(hdev->rfkill) < 0) {
2496 rfkill_destroy(hdev->rfkill);
2497 hdev->rfkill = NULL;
2498 }
2499 }
2500
Johan Hedberg5e130362013-09-13 08:58:17 +03002501 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2502 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2503
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002504 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002505 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002506
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002507 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002508 /* Assume BR/EDR support until proven otherwise (such as
2509 * through reading supported features during init.
2510 */
2511 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2512 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002513
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002514 write_lock(&hci_dev_list_lock);
2515 list_add(&hdev->list, &hci_dev_list);
2516 write_unlock(&hci_dev_list_lock);
2517
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002519 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520
Johan Hedberg19202572013-01-14 22:33:51 +02002521 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002522
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002524
David Herrmann33ca9542011-10-08 14:58:49 +02002525err_wqueue:
2526 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002527 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002528err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002529 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002530
David Herrmann33ca9542011-10-08 14:58:49 +02002531 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532}
2533EXPORT_SYMBOL(hci_register_dev);
2534
2535/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002536void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537{
Sasha Levin3df92b32012-05-27 22:36:56 +02002538 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002539
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002540 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
Johan Hovold94324962012-03-15 14:48:41 +01002542 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2543
Sasha Levin3df92b32012-05-27 22:36:56 +02002544 id = hdev->id;
2545
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002546 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002548 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549
2550 hci_dev_do_close(hdev);
2551
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302552 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002553 kfree_skb(hdev->reassembly[i]);
2554
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002555 cancel_work_sync(&hdev->power_on);
2556
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002557 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002558 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002559 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002560 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002561 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002562 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002563
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002564 /* mgmt_index_removed should take care of emptying the
2565 * pending list */
2566 BUG_ON(!list_empty(&hdev->mgmt_pending));
2567
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 hci_notify(hdev, HCI_DEV_UNREG);
2569
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002570 if (hdev->rfkill) {
2571 rfkill_unregister(hdev->rfkill);
2572 rfkill_destroy(hdev->rfkill);
2573 }
2574
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002575 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002576
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002577 debugfs_remove_recursive(hdev->debugfs);
2578
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002579 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002580 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002581
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002582 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002583 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002584 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002585 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002586 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002587 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002588 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002589
David Herrmanndc946bd2012-01-07 15:47:24 +01002590 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002591
2592 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593}
2594EXPORT_SYMBOL(hci_unregister_dev);
2595
2596/* Suspend HCI device */
2597int hci_suspend_dev(struct hci_dev *hdev)
2598{
2599 hci_notify(hdev, HCI_DEV_SUSPEND);
2600 return 0;
2601}
2602EXPORT_SYMBOL(hci_suspend_dev);
2603
2604/* Resume HCI device */
2605int hci_resume_dev(struct hci_dev *hdev)
2606{
2607 hci_notify(hdev, HCI_DEV_RESUME);
2608 return 0;
2609}
2610EXPORT_SYMBOL(hci_resume_dev);
2611
Marcel Holtmann76bca882009-11-18 00:40:39 +01002612/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002613int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002614{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002615 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002616 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002617 kfree_skb(skb);
2618 return -ENXIO;
2619 }
2620
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002621 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002622 bt_cb(skb)->incoming = 1;
2623
2624 /* Time stamp */
2625 __net_timestamp(skb);
2626
Marcel Holtmann76bca882009-11-18 00:40:39 +01002627 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002628 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002629
Marcel Holtmann76bca882009-11-18 00:40:39 +01002630 return 0;
2631}
2632EXPORT_SYMBOL(hci_recv_frame);
2633
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302634static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002635 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302636{
2637 int len = 0;
2638 int hlen = 0;
2639 int remain = count;
2640 struct sk_buff *skb;
2641 struct bt_skb_cb *scb;
2642
2643 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002644 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302645 return -EILSEQ;
2646
2647 skb = hdev->reassembly[index];
2648
2649 if (!skb) {
2650 switch (type) {
2651 case HCI_ACLDATA_PKT:
2652 len = HCI_MAX_FRAME_SIZE;
2653 hlen = HCI_ACL_HDR_SIZE;
2654 break;
2655 case HCI_EVENT_PKT:
2656 len = HCI_MAX_EVENT_SIZE;
2657 hlen = HCI_EVENT_HDR_SIZE;
2658 break;
2659 case HCI_SCODATA_PKT:
2660 len = HCI_MAX_SCO_SIZE;
2661 hlen = HCI_SCO_HDR_SIZE;
2662 break;
2663 }
2664
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002665 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302666 if (!skb)
2667 return -ENOMEM;
2668
2669 scb = (void *) skb->cb;
2670 scb->expect = hlen;
2671 scb->pkt_type = type;
2672
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302673 hdev->reassembly[index] = skb;
2674 }
2675
2676 while (count) {
2677 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002678 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302679
2680 memcpy(skb_put(skb, len), data, len);
2681
2682 count -= len;
2683 data += len;
2684 scb->expect -= len;
2685 remain = count;
2686
2687 switch (type) {
2688 case HCI_EVENT_PKT:
2689 if (skb->len == HCI_EVENT_HDR_SIZE) {
2690 struct hci_event_hdr *h = hci_event_hdr(skb);
2691 scb->expect = h->plen;
2692
2693 if (skb_tailroom(skb) < scb->expect) {
2694 kfree_skb(skb);
2695 hdev->reassembly[index] = NULL;
2696 return -ENOMEM;
2697 }
2698 }
2699 break;
2700
2701 case HCI_ACLDATA_PKT:
2702 if (skb->len == HCI_ACL_HDR_SIZE) {
2703 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2704 scb->expect = __le16_to_cpu(h->dlen);
2705
2706 if (skb_tailroom(skb) < scb->expect) {
2707 kfree_skb(skb);
2708 hdev->reassembly[index] = NULL;
2709 return -ENOMEM;
2710 }
2711 }
2712 break;
2713
2714 case HCI_SCODATA_PKT:
2715 if (skb->len == HCI_SCO_HDR_SIZE) {
2716 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2717 scb->expect = h->dlen;
2718
2719 if (skb_tailroom(skb) < scb->expect) {
2720 kfree_skb(skb);
2721 hdev->reassembly[index] = NULL;
2722 return -ENOMEM;
2723 }
2724 }
2725 break;
2726 }
2727
2728 if (scb->expect == 0) {
2729 /* Complete frame */
2730
2731 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002732 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302733
2734 hdev->reassembly[index] = NULL;
2735 return remain;
2736 }
2737 }
2738
2739 return remain;
2740}
2741
Marcel Holtmannef222012007-07-11 06:42:04 +02002742int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2743{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302744 int rem = 0;
2745
Marcel Holtmannef222012007-07-11 06:42:04 +02002746 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2747 return -EILSEQ;
2748
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002749 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002750 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302751 if (rem < 0)
2752 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002753
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302754 data += (count - rem);
2755 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002756 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002757
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302758 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002759}
2760EXPORT_SYMBOL(hci_recv_fragment);
2761
Suraj Sumangala99811512010-07-14 13:02:19 +05302762#define STREAM_REASSEMBLY 0
2763
2764int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2765{
2766 int type;
2767 int rem = 0;
2768
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002769 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302770 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2771
2772 if (!skb) {
2773 struct { char type; } *pkt;
2774
2775 /* Start of the frame */
2776 pkt = data;
2777 type = pkt->type;
2778
2779 data++;
2780 count--;
2781 } else
2782 type = bt_cb(skb)->pkt_type;
2783
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002784 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002785 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302786 if (rem < 0)
2787 return rem;
2788
2789 data += (count - rem);
2790 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002791 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302792
2793 return rem;
2794}
2795EXPORT_SYMBOL(hci_recv_stream_fragment);
2796
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797/* ---- Interface to upper protocols ---- */
2798
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799int hci_register_cb(struct hci_cb *cb)
2800{
2801 BT_DBG("%p name %s", cb, cb->name);
2802
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002803 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002805 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806
2807 return 0;
2808}
2809EXPORT_SYMBOL(hci_register_cb);
2810
2811int hci_unregister_cb(struct hci_cb *cb)
2812{
2813 BT_DBG("%p name %s", cb, cb->name);
2814
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002815 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002817 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818
2819 return 0;
2820}
2821EXPORT_SYMBOL(hci_unregister_cb);
2822
Marcel Holtmann51086992013-10-10 14:54:19 -07002823static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002825 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002827 /* Time stamp */
2828 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002830 /* Send copy to monitor */
2831 hci_send_to_monitor(hdev, skb);
2832
2833 if (atomic_read(&hdev->promisc)) {
2834 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002835 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 }
2837
2838 /* Get rid of skb owner, prior to sending to the driver. */
2839 skb_orphan(skb);
2840
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002841 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002842 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843}
2844
Johan Hedberg3119ae92013-03-05 20:37:44 +02002845void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2846{
2847 skb_queue_head_init(&req->cmd_q);
2848 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002849 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002850}
2851
2852int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2853{
2854 struct hci_dev *hdev = req->hdev;
2855 struct sk_buff *skb;
2856 unsigned long flags;
2857
2858 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2859
Andre Guedes5d73e032013-03-08 11:20:16 -03002860 /* If an error occured during request building, remove all HCI
2861 * commands queued on the HCI request queue.
2862 */
2863 if (req->err) {
2864 skb_queue_purge(&req->cmd_q);
2865 return req->err;
2866 }
2867
Johan Hedberg3119ae92013-03-05 20:37:44 +02002868 /* Do not allow empty requests */
2869 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002870 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002871
2872 skb = skb_peek_tail(&req->cmd_q);
2873 bt_cb(skb)->req.complete = complete;
2874
2875 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2876 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2877 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2878
2879 queue_work(hdev->workqueue, &hdev->cmd_work);
2880
2881 return 0;
2882}
2883
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002884static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002885 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886{
2887 int len = HCI_COMMAND_HDR_SIZE + plen;
2888 struct hci_command_hdr *hdr;
2889 struct sk_buff *skb;
2890
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002892 if (!skb)
2893 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894
2895 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002896 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 hdr->plen = plen;
2898
2899 if (plen)
2900 memcpy(skb_put(skb, plen), param, plen);
2901
2902 BT_DBG("skb len %d", skb->len);
2903
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002904 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002905
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002906 return skb;
2907}
2908
2909/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002910int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2911 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002912{
2913 struct sk_buff *skb;
2914
2915 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2916
2917 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2918 if (!skb) {
2919 BT_ERR("%s no memory for command", hdev->name);
2920 return -ENOMEM;
2921 }
2922
Johan Hedberg11714b32013-03-05 20:37:47 +02002923 /* Stand-alone HCI commands must be flaged as
2924 * single-command requests.
2925 */
2926 bt_cb(skb)->req.start = true;
2927
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002929 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930
2931 return 0;
2932}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
Johan Hedberg71c76a12013-03-05 20:37:46 +02002934/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002935void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2936 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002937{
2938 struct hci_dev *hdev = req->hdev;
2939 struct sk_buff *skb;
2940
2941 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2942
Andre Guedes34739c12013-03-08 11:20:18 -03002943 /* If an error occured during request building, there is no point in
2944 * queueing the HCI command. We can simply return.
2945 */
2946 if (req->err)
2947 return;
2948
Johan Hedberg71c76a12013-03-05 20:37:46 +02002949 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2950 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002951 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2952 hdev->name, opcode);
2953 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002954 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002955 }
2956
2957 if (skb_queue_empty(&req->cmd_q))
2958 bt_cb(skb)->req.start = true;
2959
Johan Hedberg02350a72013-04-03 21:50:29 +03002960 bt_cb(skb)->req.event = event;
2961
Johan Hedberg71c76a12013-03-05 20:37:46 +02002962 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002963}
2964
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002965void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2966 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002967{
2968 hci_req_add_ev(req, opcode, plen, param, 0);
2969}
2970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002972void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973{
2974 struct hci_command_hdr *hdr;
2975
2976 if (!hdev->sent_cmd)
2977 return NULL;
2978
2979 hdr = (void *) hdev->sent_cmd->data;
2980
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002981 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 return NULL;
2983
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002984 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985
2986 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2987}
2988
2989/* Send ACL data */
2990static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2991{
2992 struct hci_acl_hdr *hdr;
2993 int len = skb->len;
2994
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002995 skb_push(skb, HCI_ACL_HDR_SIZE);
2996 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002997 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002998 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2999 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000}
3001
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003002static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003003 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003005 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 struct hci_dev *hdev = conn->hdev;
3007 struct sk_buff *list;
3008
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003009 skb->len = skb_headlen(skb);
3010 skb->data_len = 0;
3011
3012 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003013
3014 switch (hdev->dev_type) {
3015 case HCI_BREDR:
3016 hci_add_acl_hdr(skb, conn->handle, flags);
3017 break;
3018 case HCI_AMP:
3019 hci_add_acl_hdr(skb, chan->handle, flags);
3020 break;
3021 default:
3022 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3023 return;
3024 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003025
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003026 list = skb_shinfo(skb)->frag_list;
3027 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 /* Non fragmented */
3029 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3030
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003031 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032 } else {
3033 /* Fragmented */
3034 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3035
3036 skb_shinfo(skb)->frag_list = NULL;
3037
3038 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003039 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003041 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003042
3043 flags &= ~ACL_START;
3044 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 do {
3046 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003047
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003048 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003049 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050
3051 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3052
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003053 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 } while (list);
3055
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003056 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003058}
3059
3060void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3061{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003062 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003063
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003064 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003065
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003066 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003068 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070
3071/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003072void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073{
3074 struct hci_dev *hdev = conn->hdev;
3075 struct hci_sco_hdr hdr;
3076
3077 BT_DBG("%s len %d", hdev->name, skb->len);
3078
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003079 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 hdr.dlen = skb->len;
3081
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003082 skb_push(skb, HCI_SCO_HDR_SIZE);
3083 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003084 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003086 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003087
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003089 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091
3092/* ---- HCI TX task (outgoing data) ---- */
3093
3094/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003095static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3096 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097{
3098 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003099 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003100 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003102 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003104
3105 rcu_read_lock();
3106
3107 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003108 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003110
3111 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3112 continue;
3113
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 num++;
3115
3116 if (c->sent < min) {
3117 min = c->sent;
3118 conn = c;
3119 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003120
3121 if (hci_conn_num(hdev, type) == num)
3122 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 }
3124
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003125 rcu_read_unlock();
3126
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003128 int cnt, q;
3129
3130 switch (conn->type) {
3131 case ACL_LINK:
3132 cnt = hdev->acl_cnt;
3133 break;
3134 case SCO_LINK:
3135 case ESCO_LINK:
3136 cnt = hdev->sco_cnt;
3137 break;
3138 case LE_LINK:
3139 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3140 break;
3141 default:
3142 cnt = 0;
3143 BT_ERR("Unknown link type");
3144 }
3145
3146 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 *quote = q ? q : 1;
3148 } else
3149 *quote = 0;
3150
3151 BT_DBG("conn %p quote %d", conn, *quote);
3152 return conn;
3153}
3154
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003155static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156{
3157 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003158 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159
Ville Tervobae1f5d92011-02-10 22:38:53 -03003160 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003162 rcu_read_lock();
3163
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003165 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003166 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003167 BT_ERR("%s killing stalled connection %pMR",
3168 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003169 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 }
3171 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003172
3173 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174}
3175
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003176static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3177 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003178{
3179 struct hci_conn_hash *h = &hdev->conn_hash;
3180 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003181 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003182 struct hci_conn *conn;
3183 int cnt, q, conn_num = 0;
3184
3185 BT_DBG("%s", hdev->name);
3186
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003187 rcu_read_lock();
3188
3189 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003190 struct hci_chan *tmp;
3191
3192 if (conn->type != type)
3193 continue;
3194
3195 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3196 continue;
3197
3198 conn_num++;
3199
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003200 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003201 struct sk_buff *skb;
3202
3203 if (skb_queue_empty(&tmp->data_q))
3204 continue;
3205
3206 skb = skb_peek(&tmp->data_q);
3207 if (skb->priority < cur_prio)
3208 continue;
3209
3210 if (skb->priority > cur_prio) {
3211 num = 0;
3212 min = ~0;
3213 cur_prio = skb->priority;
3214 }
3215
3216 num++;
3217
3218 if (conn->sent < min) {
3219 min = conn->sent;
3220 chan = tmp;
3221 }
3222 }
3223
3224 if (hci_conn_num(hdev, type) == conn_num)
3225 break;
3226 }
3227
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003228 rcu_read_unlock();
3229
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003230 if (!chan)
3231 return NULL;
3232
3233 switch (chan->conn->type) {
3234 case ACL_LINK:
3235 cnt = hdev->acl_cnt;
3236 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003237 case AMP_LINK:
3238 cnt = hdev->block_cnt;
3239 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003240 case SCO_LINK:
3241 case ESCO_LINK:
3242 cnt = hdev->sco_cnt;
3243 break;
3244 case LE_LINK:
3245 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3246 break;
3247 default:
3248 cnt = 0;
3249 BT_ERR("Unknown link type");
3250 }
3251
3252 q = cnt / num;
3253 *quote = q ? q : 1;
3254 BT_DBG("chan %p quote %d", chan, *quote);
3255 return chan;
3256}
3257
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003258static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3259{
3260 struct hci_conn_hash *h = &hdev->conn_hash;
3261 struct hci_conn *conn;
3262 int num = 0;
3263
3264 BT_DBG("%s", hdev->name);
3265
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003266 rcu_read_lock();
3267
3268 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003269 struct hci_chan *chan;
3270
3271 if (conn->type != type)
3272 continue;
3273
3274 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3275 continue;
3276
3277 num++;
3278
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003279 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003280 struct sk_buff *skb;
3281
3282 if (chan->sent) {
3283 chan->sent = 0;
3284 continue;
3285 }
3286
3287 if (skb_queue_empty(&chan->data_q))
3288 continue;
3289
3290 skb = skb_peek(&chan->data_q);
3291 if (skb->priority >= HCI_PRIO_MAX - 1)
3292 continue;
3293
3294 skb->priority = HCI_PRIO_MAX - 1;
3295
3296 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003297 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003298 }
3299
3300 if (hci_conn_num(hdev, type) == num)
3301 break;
3302 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003303
3304 rcu_read_unlock();
3305
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003306}
3307
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003308static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3309{
3310 /* Calculate count of blocks used by this packet */
3311 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3312}
3313
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003314static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 if (!test_bit(HCI_RAW, &hdev->flags)) {
3317 /* ACL tx timeout must be longer than maximum
3318 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003319 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003320 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003321 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003323}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003325static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003326{
3327 unsigned int cnt = hdev->acl_cnt;
3328 struct hci_chan *chan;
3329 struct sk_buff *skb;
3330 int quote;
3331
3332 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003333
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003334 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003335 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003336 u32 priority = (skb_peek(&chan->data_q))->priority;
3337 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003338 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003339 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003340
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003341 /* Stop if priority has changed */
3342 if (skb->priority < priority)
3343 break;
3344
3345 skb = skb_dequeue(&chan->data_q);
3346
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003347 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003348 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003349
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003350 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 hdev->acl_last_tx = jiffies;
3352
3353 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003354 chan->sent++;
3355 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 }
3357 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003358
3359 if (cnt != hdev->acl_cnt)
3360 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361}
3362
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003363static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003364{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003365 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003366 struct hci_chan *chan;
3367 struct sk_buff *skb;
3368 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003369 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003370
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003371 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003372
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003373 BT_DBG("%s", hdev->name);
3374
3375 if (hdev->dev_type == HCI_AMP)
3376 type = AMP_LINK;
3377 else
3378 type = ACL_LINK;
3379
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003380 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003381 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003382 u32 priority = (skb_peek(&chan->data_q))->priority;
3383 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3384 int blocks;
3385
3386 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003387 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003388
3389 /* Stop if priority has changed */
3390 if (skb->priority < priority)
3391 break;
3392
3393 skb = skb_dequeue(&chan->data_q);
3394
3395 blocks = __get_blocks(hdev, skb);
3396 if (blocks > hdev->block_cnt)
3397 return;
3398
3399 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003400 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003401
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003402 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003403 hdev->acl_last_tx = jiffies;
3404
3405 hdev->block_cnt -= blocks;
3406 quote -= blocks;
3407
3408 chan->sent += blocks;
3409 chan->conn->sent += blocks;
3410 }
3411 }
3412
3413 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003414 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003415}
3416
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003417static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003418{
3419 BT_DBG("%s", hdev->name);
3420
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003421 /* No ACL link over BR/EDR controller */
3422 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3423 return;
3424
3425 /* No AMP link over AMP controller */
3426 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003427 return;
3428
3429 switch (hdev->flow_ctl_mode) {
3430 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3431 hci_sched_acl_pkt(hdev);
3432 break;
3433
3434 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3435 hci_sched_acl_blk(hdev);
3436 break;
3437 }
3438}
3439
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003441static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442{
3443 struct hci_conn *conn;
3444 struct sk_buff *skb;
3445 int quote;
3446
3447 BT_DBG("%s", hdev->name);
3448
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003449 if (!hci_conn_num(hdev, SCO_LINK))
3450 return;
3451
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3453 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3454 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003455 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456
3457 conn->sent++;
3458 if (conn->sent == ~0)
3459 conn->sent = 0;
3460 }
3461 }
3462}
3463
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003464static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003465{
3466 struct hci_conn *conn;
3467 struct sk_buff *skb;
3468 int quote;
3469
3470 BT_DBG("%s", hdev->name);
3471
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003472 if (!hci_conn_num(hdev, ESCO_LINK))
3473 return;
3474
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003475 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3476 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003477 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3478 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003479 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003480
3481 conn->sent++;
3482 if (conn->sent == ~0)
3483 conn->sent = 0;
3484 }
3485 }
3486}
3487
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003488static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003489{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003490 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003491 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003492 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003493
3494 BT_DBG("%s", hdev->name);
3495
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003496 if (!hci_conn_num(hdev, LE_LINK))
3497 return;
3498
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003499 if (!test_bit(HCI_RAW, &hdev->flags)) {
3500 /* LE tx timeout must be longer than maximum
3501 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003502 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003503 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003504 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003505 }
3506
3507 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003508 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003509 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003510 u32 priority = (skb_peek(&chan->data_q))->priority;
3511 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003512 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003513 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003514
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003515 /* Stop if priority has changed */
3516 if (skb->priority < priority)
3517 break;
3518
3519 skb = skb_dequeue(&chan->data_q);
3520
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003521 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003522 hdev->le_last_tx = jiffies;
3523
3524 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003525 chan->sent++;
3526 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003527 }
3528 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003529
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003530 if (hdev->le_pkts)
3531 hdev->le_cnt = cnt;
3532 else
3533 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003534
3535 if (cnt != tmp)
3536 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003537}
3538
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003539static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003541 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 struct sk_buff *skb;
3543
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003544 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003545 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546
Marcel Holtmann52de5992013-09-03 18:08:38 -07003547 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3548 /* Schedule queues and send stuff to HCI driver */
3549 hci_sched_acl(hdev);
3550 hci_sched_sco(hdev);
3551 hci_sched_esco(hdev);
3552 hci_sched_le(hdev);
3553 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003554
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 /* Send next queued raw (unknown type) packet */
3556 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003557 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558}
3559
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003560/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561
3562/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003563static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564{
3565 struct hci_acl_hdr *hdr = (void *) skb->data;
3566 struct hci_conn *conn;
3567 __u16 handle, flags;
3568
3569 skb_pull(skb, HCI_ACL_HDR_SIZE);
3570
3571 handle = __le16_to_cpu(hdr->handle);
3572 flags = hci_flags(handle);
3573 handle = hci_handle(handle);
3574
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003575 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003576 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577
3578 hdev->stat.acl_rx++;
3579
3580 hci_dev_lock(hdev);
3581 conn = hci_conn_hash_lookup_handle(hdev, handle);
3582 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003583
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003585 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003586
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003588 l2cap_recv_acldata(conn, skb, flags);
3589 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003591 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003592 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 }
3594
3595 kfree_skb(skb);
3596}
3597
3598/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003599static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600{
3601 struct hci_sco_hdr *hdr = (void *) skb->data;
3602 struct hci_conn *conn;
3603 __u16 handle;
3604
3605 skb_pull(skb, HCI_SCO_HDR_SIZE);
3606
3607 handle = __le16_to_cpu(hdr->handle);
3608
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003609 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610
3611 hdev->stat.sco_rx++;
3612
3613 hci_dev_lock(hdev);
3614 conn = hci_conn_hash_lookup_handle(hdev, handle);
3615 hci_dev_unlock(hdev);
3616
3617 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003619 sco_recv_scodata(conn, skb);
3620 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003622 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003623 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 }
3625
3626 kfree_skb(skb);
3627}
3628
Johan Hedberg9238f362013-03-05 20:37:48 +02003629static bool hci_req_is_complete(struct hci_dev *hdev)
3630{
3631 struct sk_buff *skb;
3632
3633 skb = skb_peek(&hdev->cmd_q);
3634 if (!skb)
3635 return true;
3636
3637 return bt_cb(skb)->req.start;
3638}
3639
Johan Hedberg42c6b122013-03-05 20:37:49 +02003640static void hci_resend_last(struct hci_dev *hdev)
3641{
3642 struct hci_command_hdr *sent;
3643 struct sk_buff *skb;
3644 u16 opcode;
3645
3646 if (!hdev->sent_cmd)
3647 return;
3648
3649 sent = (void *) hdev->sent_cmd->data;
3650 opcode = __le16_to_cpu(sent->opcode);
3651 if (opcode == HCI_OP_RESET)
3652 return;
3653
3654 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3655 if (!skb)
3656 return;
3657
3658 skb_queue_head(&hdev->cmd_q, skb);
3659 queue_work(hdev->workqueue, &hdev->cmd_work);
3660}
3661
Johan Hedberg9238f362013-03-05 20:37:48 +02003662void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3663{
3664 hci_req_complete_t req_complete = NULL;
3665 struct sk_buff *skb;
3666 unsigned long flags;
3667
3668 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3669
Johan Hedberg42c6b122013-03-05 20:37:49 +02003670 /* If the completed command doesn't match the last one that was
3671 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003672 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003673 if (!hci_sent_cmd_data(hdev, opcode)) {
3674 /* Some CSR based controllers generate a spontaneous
3675 * reset complete event during init and any pending
3676 * command will never be completed. In such a case we
3677 * need to resend whatever was the last sent
3678 * command.
3679 */
3680 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3681 hci_resend_last(hdev);
3682
Johan Hedberg9238f362013-03-05 20:37:48 +02003683 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003684 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003685
3686 /* If the command succeeded and there's still more commands in
3687 * this request the request is not yet complete.
3688 */
3689 if (!status && !hci_req_is_complete(hdev))
3690 return;
3691
3692 /* If this was the last command in a request the complete
3693 * callback would be found in hdev->sent_cmd instead of the
3694 * command queue (hdev->cmd_q).
3695 */
3696 if (hdev->sent_cmd) {
3697 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003698
3699 if (req_complete) {
3700 /* We must set the complete callback to NULL to
3701 * avoid calling the callback more than once if
3702 * this function gets called again.
3703 */
3704 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3705
Johan Hedberg9238f362013-03-05 20:37:48 +02003706 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003707 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003708 }
3709
3710 /* Remove all pending commands belonging to this request */
3711 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3712 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3713 if (bt_cb(skb)->req.start) {
3714 __skb_queue_head(&hdev->cmd_q, skb);
3715 break;
3716 }
3717
3718 req_complete = bt_cb(skb)->req.complete;
3719 kfree_skb(skb);
3720 }
3721 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3722
3723call_complete:
3724 if (req_complete)
3725 req_complete(hdev, status);
3726}
3727
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003728static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003730 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 struct sk_buff *skb;
3732
3733 BT_DBG("%s", hdev->name);
3734
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003736 /* Send copy to monitor */
3737 hci_send_to_monitor(hdev, skb);
3738
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 if (atomic_read(&hdev->promisc)) {
3740 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003741 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742 }
3743
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003744 if (test_bit(HCI_RAW, &hdev->flags) ||
3745 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746 kfree_skb(skb);
3747 continue;
3748 }
3749
3750 if (test_bit(HCI_INIT, &hdev->flags)) {
3751 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003752 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 case HCI_ACLDATA_PKT:
3754 case HCI_SCODATA_PKT:
3755 kfree_skb(skb);
3756 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003757 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 }
3759
3760 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003761 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003763 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 hci_event_packet(hdev, skb);
3765 break;
3766
3767 case HCI_ACLDATA_PKT:
3768 BT_DBG("%s ACL data packet", hdev->name);
3769 hci_acldata_packet(hdev, skb);
3770 break;
3771
3772 case HCI_SCODATA_PKT:
3773 BT_DBG("%s SCO data packet", hdev->name);
3774 hci_scodata_packet(hdev, skb);
3775 break;
3776
3777 default:
3778 kfree_skb(skb);
3779 break;
3780 }
3781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782}
3783
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003784static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003786 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 struct sk_buff *skb;
3788
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003789 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3790 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003793 if (atomic_read(&hdev->cmd_cnt)) {
3794 skb = skb_dequeue(&hdev->cmd_q);
3795 if (!skb)
3796 return;
3797
Wei Yongjun7585b972009-02-25 18:29:52 +08003798 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003800 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003801 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003803 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003804 if (test_bit(HCI_RESET, &hdev->flags))
3805 del_timer(&hdev->cmd_timer);
3806 else
3807 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003808 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 } else {
3810 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003811 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812 }
3813 }
3814}