blob: 2376c30401943e447fdb0c54603484abe3cce8f2 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070058/* ---- HCI debugfs entries ---- */
59
60static int inquiry_cache_show(struct seq_file *f, void *p)
61{
62 struct hci_dev *hdev = f->private;
63 struct discovery_state *cache = &hdev->discovery;
64 struct inquiry_entry *e;
65
66 hci_dev_lock(hdev);
67
68 list_for_each_entry(e, &cache->all, all) {
69 struct inquiry_data *data = &e->data;
70 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
71 &data->bdaddr,
72 data->pscan_rep_mode, data->pscan_period_mode,
73 data->pscan_mode, data->dev_class[2],
74 data->dev_class[1], data->dev_class[0],
75 __le16_to_cpu(data->clock_offset),
76 data->rssi, data->ssp_mode, e->timestamp);
77 }
78
79 hci_dev_unlock(hdev);
80
81 return 0;
82}
83
84static int inquiry_cache_open(struct inode *inode, struct file *file)
85{
86 return single_open(file, inquiry_cache_show, inode->i_private);
87}
88
89static const struct file_operations inquiry_cache_fops = {
90 .open = inquiry_cache_open,
91 .read = seq_read,
92 .llseek = seq_lseek,
93 .release = single_release,
94};
95
Marcel Holtmann041000b2013-10-17 12:02:31 -070096static int voice_setting_get(void *data, u64 *val)
97{
98 struct hci_dev *hdev = data;
99
100 hci_dev_lock(hdev);
101 *val = hdev->voice_setting;
102 hci_dev_unlock(hdev);
103
104 return 0;
105}
106
107DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
108 NULL, "0x%4.4llx\n");
109
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700110static int auto_accept_delay_set(void *data, u64 val)
111{
112 struct hci_dev *hdev = data;
113
114 hci_dev_lock(hdev);
115 hdev->auto_accept_delay = val;
116 hci_dev_unlock(hdev);
117
118 return 0;
119}
120
121static int auto_accept_delay_get(void *data, u64 *val)
122{
123 struct hci_dev *hdev = data;
124
125 hci_dev_lock(hdev);
126 *val = hdev->auto_accept_delay;
127 hci_dev_unlock(hdev);
128
129 return 0;
130}
131
132DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
133 auto_accept_delay_set, "%llu\n");
134
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700135static int static_address_show(struct seq_file *f, void *p)
136{
137 struct hci_dev *hdev = f->private;
138
139 hci_dev_lock(hdev);
140 seq_printf(f, "%pMR\n", &hdev->static_addr);
141 hci_dev_unlock(hdev);
142
143 return 0;
144}
145
146static int static_address_open(struct inode *inode, struct file *file)
147{
148 return single_open(file, static_address_show, inode->i_private);
149}
150
151static const struct file_operations static_address_fops = {
152 .open = static_address_open,
153 .read = seq_read,
154 .llseek = seq_lseek,
155 .release = single_release,
156};
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/* ---- HCI requests ---- */
159
Johan Hedberg42c6b122013-03-05 20:37:49 +0200160static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200162 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 if (hdev->req_status == HCI_REQ_PEND) {
165 hdev->req_result = result;
166 hdev->req_status = HCI_REQ_DONE;
167 wake_up_interruptible(&hdev->req_wait_q);
168 }
169}
170
171static void hci_req_cancel(struct hci_dev *hdev, int err)
172{
173 BT_DBG("%s err 0x%2.2x", hdev->name, err);
174
175 if (hdev->req_status == HCI_REQ_PEND) {
176 hdev->req_result = err;
177 hdev->req_status = HCI_REQ_CANCELED;
178 wake_up_interruptible(&hdev->req_wait_q);
179 }
180}
181
Fengguang Wu77a63e02013-04-20 16:24:31 +0300182static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
183 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300184{
185 struct hci_ev_cmd_complete *ev;
186 struct hci_event_hdr *hdr;
187 struct sk_buff *skb;
188
189 hci_dev_lock(hdev);
190
191 skb = hdev->recv_evt;
192 hdev->recv_evt = NULL;
193
194 hci_dev_unlock(hdev);
195
196 if (!skb)
197 return ERR_PTR(-ENODATA);
198
199 if (skb->len < sizeof(*hdr)) {
200 BT_ERR("Too short HCI event");
201 goto failed;
202 }
203
204 hdr = (void *) skb->data;
205 skb_pull(skb, HCI_EVENT_HDR_SIZE);
206
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300207 if (event) {
208 if (hdr->evt != event)
209 goto failed;
210 return skb;
211 }
212
Johan Hedberg75e84b72013-04-02 13:35:04 +0300213 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
214 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
215 goto failed;
216 }
217
218 if (skb->len < sizeof(*ev)) {
219 BT_ERR("Too short cmd_complete event");
220 goto failed;
221 }
222
223 ev = (void *) skb->data;
224 skb_pull(skb, sizeof(*ev));
225
226 if (opcode == __le16_to_cpu(ev->opcode))
227 return skb;
228
229 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
230 __le16_to_cpu(ev->opcode));
231
232failed:
233 kfree_skb(skb);
234 return ERR_PTR(-ENODATA);
235}
236
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300237struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300238 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300239{
240 DECLARE_WAITQUEUE(wait, current);
241 struct hci_request req;
242 int err = 0;
243
244 BT_DBG("%s", hdev->name);
245
246 hci_req_init(&req, hdev);
247
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300248 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300249
250 hdev->req_status = HCI_REQ_PEND;
251
252 err = hci_req_run(&req, hci_req_sync_complete);
253 if (err < 0)
254 return ERR_PTR(err);
255
256 add_wait_queue(&hdev->req_wait_q, &wait);
257 set_current_state(TASK_INTERRUPTIBLE);
258
259 schedule_timeout(timeout);
260
261 remove_wait_queue(&hdev->req_wait_q, &wait);
262
263 if (signal_pending(current))
264 return ERR_PTR(-EINTR);
265
266 switch (hdev->req_status) {
267 case HCI_REQ_DONE:
268 err = -bt_to_errno(hdev->req_result);
269 break;
270
271 case HCI_REQ_CANCELED:
272 err = -hdev->req_result;
273 break;
274
275 default:
276 err = -ETIMEDOUT;
277 break;
278 }
279
280 hdev->req_status = hdev->req_result = 0;
281
282 BT_DBG("%s end: err %d", hdev->name, err);
283
284 if (err < 0)
285 return ERR_PTR(err);
286
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300287 return hci_get_cmd_complete(hdev, opcode, event);
288}
289EXPORT_SYMBOL(__hci_cmd_sync_ev);
290
291struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300292 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300293{
294 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300295}
296EXPORT_SYMBOL(__hci_cmd_sync);
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200299static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 void (*func)(struct hci_request *req,
301 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200302 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200304 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 DECLARE_WAITQUEUE(wait, current);
306 int err = 0;
307
308 BT_DBG("%s start", hdev->name);
309
Johan Hedberg42c6b122013-03-05 20:37:49 +0200310 hci_req_init(&req, hdev);
311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 hdev->req_status = HCI_REQ_PEND;
313
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200315
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316 err = hci_req_run(&req, hci_req_sync_complete);
317 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200318 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300319
320 /* ENODATA means the HCI request command queue is empty.
321 * This can happen when a request with conditionals doesn't
322 * trigger any commands to be sent. This is normal behavior
323 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200324 */
Andre Guedes920c8302013-03-08 11:20:15 -0300325 if (err == -ENODATA)
326 return 0;
327
328 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200329 }
330
Andre Guedesbc4445c2013-03-08 11:20:13 -0300331 add_wait_queue(&hdev->req_wait_q, &wait);
332 set_current_state(TASK_INTERRUPTIBLE);
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 schedule_timeout(timeout);
335
336 remove_wait_queue(&hdev->req_wait_q, &wait);
337
338 if (signal_pending(current))
339 return -EINTR;
340
341 switch (hdev->req_status) {
342 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700343 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 break;
345
346 case HCI_REQ_CANCELED:
347 err = -hdev->req_result;
348 break;
349
350 default:
351 err = -ETIMEDOUT;
352 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Johan Hedberga5040ef2011-01-10 13:28:59 +0200355 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357 BT_DBG("%s end: err %d", hdev->name, err);
358
359 return err;
360}
361
Johan Hedberg01178cd2013-03-05 20:37:41 +0200362static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 void (*req)(struct hci_request *req,
364 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200365 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 int ret;
368
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200369 if (!test_bit(HCI_UP, &hdev->flags))
370 return -ENETDOWN;
371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 /* Serialize all requests */
373 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200374 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 hci_req_unlock(hdev);
376
377 return ret;
378}
379
Johan Hedberg42c6b122013-03-05 20:37:49 +0200380static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 set_bit(HCI_RESET, &req->hdev->flags);
386 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
388
Johan Hedberg42c6b122013-03-05 20:37:49 +0200389static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200396 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398
399 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401}
402
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200404{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200405 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200406
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200407 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200408 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300409
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700410 /* Read Local Supported Commands */
411 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
412
413 /* Read Local Supported Features */
414 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
415
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300416 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200417 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300418
419 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200420 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700421
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700422 /* Read Flow Control Mode */
423 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
424
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700425 /* Read Location Data */
426 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200427}
428
Johan Hedberg42c6b122013-03-05 20:37:49 +0200429static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200430{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432
433 BT_DBG("%s %ld", hdev->name, opt);
434
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300435 /* Reset */
436 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300438
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200439 switch (hdev->dev_type) {
440 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442 break;
443
444 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200446 break;
447
448 default:
449 BT_ERR("Unknown device type %d", hdev->dev_type);
450 break;
451 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200452}
453
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700456 struct hci_dev *hdev = req->hdev;
457
Johan Hedberg2177bab2013-03-05 20:37:43 +0200458 __le16 param;
459 __u8 flt_type;
460
461 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200462 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200463
464 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200465 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200466
467 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200468 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469
470 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200472
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700473 /* Read Number of Supported IAC */
474 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
475
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700476 /* Read Current IAC LAP */
477 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
478
Johan Hedberg2177bab2013-03-05 20:37:43 +0200479 /* Clear Event Filters */
480 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 /* Connection accept timeout ~20 secs */
484 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200485 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200486
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700487 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
488 * but it does not support page scan related HCI commands.
489 */
490 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500491 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
492 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
493 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200494}
495
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200497{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300498 struct hci_dev *hdev = req->hdev;
499
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200501 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200502
503 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505
506 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200507 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508
509 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200511
512 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200513 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300514
515 /* LE-only controllers have LE implicitly enabled */
516 if (!lmp_bredr_capable(hdev))
517 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518}
519
520static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
521{
522 if (lmp_ext_inq_capable(hdev))
523 return 0x02;
524
525 if (lmp_inq_rssi_capable(hdev))
526 return 0x01;
527
528 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
529 hdev->lmp_subver == 0x0757)
530 return 0x01;
531
532 if (hdev->manufacturer == 15) {
533 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
534 return 0x01;
535 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
536 return 0x01;
537 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
538 return 0x01;
539 }
540
541 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
542 hdev->lmp_subver == 0x1805)
543 return 0x01;
544
545 return 0x00;
546}
547
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549{
550 u8 mode;
551
Johan Hedberg42c6b122013-03-05 20:37:49 +0200552 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553
Johan Hedberg42c6b122013-03-05 20:37:49 +0200554 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200555}
556
Johan Hedberg42c6b122013-03-05 20:37:49 +0200557static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200558{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 struct hci_dev *hdev = req->hdev;
560
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 /* The second byte is 0xff instead of 0x9f (two reserved bits
562 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
563 * command otherwise.
564 */
565 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
566
567 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
568 * any event mask for pre 1.2 devices.
569 */
570 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
571 return;
572
573 if (lmp_bredr_capable(hdev)) {
574 events[4] |= 0x01; /* Flow Specification Complete */
575 events[4] |= 0x02; /* Inquiry Result with RSSI */
576 events[4] |= 0x04; /* Read Remote Extended Features Complete */
577 events[5] |= 0x08; /* Synchronous Connection Complete */
578 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700579 } else {
580 /* Use a different default for LE-only devices */
581 memset(events, 0, sizeof(events));
582 events[0] |= 0x10; /* Disconnection Complete */
583 events[0] |= 0x80; /* Encryption Change */
584 events[1] |= 0x08; /* Read Remote Version Information Complete */
585 events[1] |= 0x20; /* Command Complete */
586 events[1] |= 0x40; /* Command Status */
587 events[1] |= 0x80; /* Hardware Error */
588 events[2] |= 0x04; /* Number of Completed Packets */
589 events[3] |= 0x02; /* Data Buffer Overflow */
590 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200591 }
592
593 if (lmp_inq_rssi_capable(hdev))
594 events[4] |= 0x02; /* Inquiry Result with RSSI */
595
596 if (lmp_sniffsubr_capable(hdev))
597 events[5] |= 0x20; /* Sniff Subrating */
598
599 if (lmp_pause_enc_capable(hdev))
600 events[5] |= 0x80; /* Encryption Key Refresh Complete */
601
602 if (lmp_ext_inq_capable(hdev))
603 events[5] |= 0x40; /* Extended Inquiry Result */
604
605 if (lmp_no_flush_capable(hdev))
606 events[7] |= 0x01; /* Enhanced Flush Complete */
607
608 if (lmp_lsto_capable(hdev))
609 events[6] |= 0x80; /* Link Supervision Timeout Changed */
610
611 if (lmp_ssp_capable(hdev)) {
612 events[6] |= 0x01; /* IO Capability Request */
613 events[6] |= 0x02; /* IO Capability Response */
614 events[6] |= 0x04; /* User Confirmation Request */
615 events[6] |= 0x08; /* User Passkey Request */
616 events[6] |= 0x10; /* Remote OOB Data Request */
617 events[6] |= 0x20; /* Simple Pairing Complete */
618 events[7] |= 0x04; /* User Passkey Notification */
619 events[7] |= 0x08; /* Keypress Notification */
620 events[7] |= 0x10; /* Remote Host Supported
621 * Features Notification
622 */
623 }
624
625 if (lmp_le_capable(hdev))
626 events[7] |= 0x20; /* LE Meta-Event */
627
Johan Hedberg42c6b122013-03-05 20:37:49 +0200628 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200629
630 if (lmp_le_capable(hdev)) {
631 memset(events, 0, sizeof(events));
632 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200633 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
634 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635 }
636}
637
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 struct hci_dev *hdev = req->hdev;
641
Johan Hedberg2177bab2013-03-05 20:37:43 +0200642 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300644 else
645 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200646
647 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200651
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300652 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
653 * local supported commands HCI command.
654 */
655 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657
658 if (lmp_ssp_capable(hdev)) {
659 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
660 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
662 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663 } else {
664 struct hci_cp_write_eir cp;
665
666 memset(hdev->eir, 0, sizeof(hdev->eir));
667 memset(&cp, 0, sizeof(cp));
668
Johan Hedberg42c6b122013-03-05 20:37:49 +0200669 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200670 }
671 }
672
673 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675
676 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200678
679 if (lmp_ext_feat_capable(hdev)) {
680 struct hci_cp_read_local_ext_features cp;
681
682 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
684 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 }
686
687 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
688 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200689 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
690 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200691 }
692}
693
Johan Hedberg42c6b122013-03-05 20:37:49 +0200694static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200697 struct hci_cp_write_def_link_policy cp;
698 u16 link_policy = 0;
699
700 if (lmp_rswitch_capable(hdev))
701 link_policy |= HCI_LP_RSWITCH;
702 if (lmp_hold_capable(hdev))
703 link_policy |= HCI_LP_HOLD;
704 if (lmp_sniff_capable(hdev))
705 link_policy |= HCI_LP_SNIFF;
706 if (lmp_park_capable(hdev))
707 link_policy |= HCI_LP_PARK;
708
709 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200711}
712
Johan Hedberg42c6b122013-03-05 20:37:49 +0200713static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200715 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716 struct hci_cp_write_le_host_supported cp;
717
Johan Hedbergc73eee92013-04-19 18:35:21 +0300718 /* LE-only devices do not support explicit enablement */
719 if (!lmp_bredr_capable(hdev))
720 return;
721
Johan Hedberg2177bab2013-03-05 20:37:43 +0200722 memset(&cp, 0, sizeof(cp));
723
724 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
725 cp.le = 0x01;
726 cp.simul = lmp_le_br_capable(hdev);
727 }
728
729 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
731 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200732}
733
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300734static void hci_set_event_mask_page_2(struct hci_request *req)
735{
736 struct hci_dev *hdev = req->hdev;
737 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
738
739 /* If Connectionless Slave Broadcast master role is supported
740 * enable all necessary events for it.
741 */
742 if (hdev->features[2][0] & 0x01) {
743 events[1] |= 0x40; /* Triggered Clock Capture */
744 events[1] |= 0x80; /* Synchronization Train Complete */
745 events[2] |= 0x10; /* Slave Page Response Timeout */
746 events[2] |= 0x20; /* CSB Channel Map Change */
747 }
748
749 /* If Connectionless Slave Broadcast slave role is supported
750 * enable all necessary events for it.
751 */
752 if (hdev->features[2][0] & 0x02) {
753 events[2] |= 0x01; /* Synchronization Train Received */
754 events[2] |= 0x02; /* CSB Receive */
755 events[2] |= 0x04; /* CSB Timeout */
756 events[2] |= 0x08; /* Truncated Page Complete */
757 }
758
759 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
760}
761
Johan Hedberg42c6b122013-03-05 20:37:49 +0200762static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200763{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200764 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300765 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100767 /* Some Broadcom based Bluetooth controllers do not support the
768 * Delete Stored Link Key command. They are clearly indicating its
769 * absence in the bit mask of supported commands.
770 *
771 * Check the supported commands and only if the the command is marked
772 * as supported send it. If not supported assume that the controller
773 * does not have actual support for stored link keys which makes this
774 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700775 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300776 if (hdev->commands[6] & 0x80) {
777 struct hci_cp_delete_stored_link_key cp;
778
779 bacpy(&cp.bdaddr, BDADDR_ANY);
780 cp.delete_all = 0x01;
781 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
782 sizeof(cp), &cp);
783 }
784
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200786 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200787
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700788 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200789 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300790
791 /* Read features beyond page 1 if available */
792 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
793 struct hci_cp_read_local_ext_features cp;
794
795 cp.page = p;
796 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
797 sizeof(cp), &cp);
798 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200799}
800
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300801static void hci_init4_req(struct hci_request *req, unsigned long opt)
802{
803 struct hci_dev *hdev = req->hdev;
804
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300805 /* Set event mask page 2 if the HCI command for it is supported */
806 if (hdev->commands[22] & 0x04)
807 hci_set_event_mask_page_2(req);
808
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300809 /* Check for Synchronization Train support */
810 if (hdev->features[2][0] & 0x04)
811 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
812}
813
Johan Hedberg2177bab2013-03-05 20:37:43 +0200814static int __hci_init(struct hci_dev *hdev)
815{
816 int err;
817
818 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
819 if (err < 0)
820 return err;
821
822 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
823 * BR/EDR/LE type controllers. AMP controllers only need the
824 * first stage init.
825 */
826 if (hdev->dev_type != HCI_BREDR)
827 return 0;
828
829 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
830 if (err < 0)
831 return err;
832
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300833 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
834 if (err < 0)
835 return err;
836
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700837 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
838 if (err < 0)
839 return err;
840
841 /* Only create debugfs entries during the initial setup
842 * phase and not every time the controller gets powered on.
843 */
844 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
845 return 0;
846
847 if (lmp_bredr_capable(hdev)) {
848 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
849 hdev, &inquiry_cache_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -0700850 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
851 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700852 }
853
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700854 if (lmp_ssp_capable(hdev))
855 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
856 hdev, &auto_accept_delay_fops);
857
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700858 if (lmp_le_capable(hdev))
859 debugfs_create_file("static_address", 0444, hdev->debugfs,
860 hdev, &static_address_fops);
861
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700862 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200863}
864
Johan Hedberg42c6b122013-03-05 20:37:49 +0200865static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866{
867 __u8 scan = opt;
868
Johan Hedberg42c6b122013-03-05 20:37:49 +0200869 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200872 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873}
874
Johan Hedberg42c6b122013-03-05 20:37:49 +0200875static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
877 __u8 auth = opt;
878
Johan Hedberg42c6b122013-03-05 20:37:49 +0200879 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200882 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883}
884
Johan Hedberg42c6b122013-03-05 20:37:49 +0200885static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
887 __u8 encrypt = opt;
888
Johan Hedberg42c6b122013-03-05 20:37:49 +0200889 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200891 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200892 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893}
894
Johan Hedberg42c6b122013-03-05 20:37:49 +0200895static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200896{
897 __le16 policy = cpu_to_le16(opt);
898
Johan Hedberg42c6b122013-03-05 20:37:49 +0200899 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200900
901 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200902 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200903}
904
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900905/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 * Device is held on return. */
907struct hci_dev *hci_dev_get(int index)
908{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200909 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
911 BT_DBG("%d", index);
912
913 if (index < 0)
914 return NULL;
915
916 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200917 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 if (d->id == index) {
919 hdev = hci_dev_hold(d);
920 break;
921 }
922 }
923 read_unlock(&hci_dev_list_lock);
924 return hdev;
925}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200928
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200929bool hci_discovery_active(struct hci_dev *hdev)
930{
931 struct discovery_state *discov = &hdev->discovery;
932
Andre Guedes6fbe1952012-02-03 17:47:58 -0300933 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300934 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300935 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200936 return true;
937
Andre Guedes6fbe1952012-02-03 17:47:58 -0300938 default:
939 return false;
940 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200941}
942
Johan Hedbergff9ef572012-01-04 14:23:45 +0200943void hci_discovery_set_state(struct hci_dev *hdev, int state)
944{
945 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
946
947 if (hdev->discovery.state == state)
948 return;
949
950 switch (state) {
951 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300952 if (hdev->discovery.state != DISCOVERY_STARTING)
953 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200954 break;
955 case DISCOVERY_STARTING:
956 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300957 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200958 mgmt_discovering(hdev, 1);
959 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200960 case DISCOVERY_RESOLVING:
961 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200962 case DISCOVERY_STOPPING:
963 break;
964 }
965
966 hdev->discovery.state = state;
967}
968
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300969void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970{
Johan Hedberg30883512012-01-04 14:16:21 +0200971 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200972 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Johan Hedberg561aafb2012-01-04 13:31:59 +0200974 list_for_each_entry_safe(p, n, &cache->all, all) {
975 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200976 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200978
979 INIT_LIST_HEAD(&cache->unknown);
980 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981}
982
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300983struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
984 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985{
Johan Hedberg30883512012-01-04 14:16:21 +0200986 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 struct inquiry_entry *e;
988
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300989 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Johan Hedberg561aafb2012-01-04 13:31:59 +0200991 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200993 return e;
994 }
995
996 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997}
998
Johan Hedberg561aafb2012-01-04 13:31:59 +0200999struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001000 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001001{
Johan Hedberg30883512012-01-04 14:16:21 +02001002 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001003 struct inquiry_entry *e;
1004
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001005 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001006
1007 list_for_each_entry(e, &cache->unknown, list) {
1008 if (!bacmp(&e->data.bdaddr, bdaddr))
1009 return e;
1010 }
1011
1012 return NULL;
1013}
1014
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001015struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001016 bdaddr_t *bdaddr,
1017 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001018{
1019 struct discovery_state *cache = &hdev->discovery;
1020 struct inquiry_entry *e;
1021
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001022 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001023
1024 list_for_each_entry(e, &cache->resolve, list) {
1025 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1026 return e;
1027 if (!bacmp(&e->data.bdaddr, bdaddr))
1028 return e;
1029 }
1030
1031 return NULL;
1032}
1033
Johan Hedberga3d4e202012-01-09 00:53:02 +02001034void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001035 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001036{
1037 struct discovery_state *cache = &hdev->discovery;
1038 struct list_head *pos = &cache->resolve;
1039 struct inquiry_entry *p;
1040
1041 list_del(&ie->list);
1042
1043 list_for_each_entry(p, &cache->resolve, list) {
1044 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001045 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001046 break;
1047 pos = &p->list;
1048 }
1049
1050 list_add(&ie->list, pos);
1051}
1052
Johan Hedberg31754052012-01-04 13:39:52 +02001053bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001054 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055{
Johan Hedberg30883512012-01-04 14:16:21 +02001056 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001057 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001059 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Szymon Janc2b2fec42012-11-20 11:38:54 +01001061 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1062
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001063 if (ssp)
1064 *ssp = data->ssp_mode;
1065
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001066 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001067 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001068 if (ie->data.ssp_mode && ssp)
1069 *ssp = true;
1070
Johan Hedberga3d4e202012-01-09 00:53:02 +02001071 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001072 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001073 ie->data.rssi = data->rssi;
1074 hci_inquiry_cache_update_resolve(hdev, ie);
1075 }
1076
Johan Hedberg561aafb2012-01-04 13:31:59 +02001077 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001078 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001079
Johan Hedberg561aafb2012-01-04 13:31:59 +02001080 /* Entry not in the cache. Add new one. */
1081 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1082 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001083 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001084
1085 list_add(&ie->all, &cache->all);
1086
1087 if (name_known) {
1088 ie->name_state = NAME_KNOWN;
1089 } else {
1090 ie->name_state = NAME_NOT_KNOWN;
1091 list_add(&ie->list, &cache->unknown);
1092 }
1093
1094update:
1095 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001096 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001097 ie->name_state = NAME_KNOWN;
1098 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 }
1100
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001101 memcpy(&ie->data, data, sizeof(*data));
1102 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001104
1105 if (ie->name_state == NAME_NOT_KNOWN)
1106 return false;
1107
1108 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109}
1110
1111static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1112{
Johan Hedberg30883512012-01-04 14:16:21 +02001113 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 struct inquiry_info *info = (struct inquiry_info *) buf;
1115 struct inquiry_entry *e;
1116 int copied = 0;
1117
Johan Hedberg561aafb2012-01-04 13:31:59 +02001118 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001120
1121 if (copied >= num)
1122 break;
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 bacpy(&info->bdaddr, &data->bdaddr);
1125 info->pscan_rep_mode = data->pscan_rep_mode;
1126 info->pscan_period_mode = data->pscan_period_mode;
1127 info->pscan_mode = data->pscan_mode;
1128 memcpy(info->dev_class, data->dev_class, 3);
1129 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001132 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
1134
1135 BT_DBG("cache %p, copied %d", cache, copied);
1136 return copied;
1137}
1138
Johan Hedberg42c6b122013-03-05 20:37:49 +02001139static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
1141 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 struct hci_cp_inquiry cp;
1144
1145 BT_DBG("%s", hdev->name);
1146
1147 if (test_bit(HCI_INQUIRY, &hdev->flags))
1148 return;
1149
1150 /* Start Inquiry */
1151 memcpy(&cp.lap, &ir->lap, 3);
1152 cp.length = ir->length;
1153 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001154 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155}
1156
Andre Guedes3e13fa12013-03-27 20:04:56 -03001157static int wait_inquiry(void *word)
1158{
1159 schedule();
1160 return signal_pending(current);
1161}
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163int hci_inquiry(void __user *arg)
1164{
1165 __u8 __user *ptr = arg;
1166 struct hci_inquiry_req ir;
1167 struct hci_dev *hdev;
1168 int err = 0, do_inquiry = 0, max_rsp;
1169 long timeo;
1170 __u8 *buf;
1171
1172 if (copy_from_user(&ir, ptr, sizeof(ir)))
1173 return -EFAULT;
1174
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001175 hdev = hci_dev_get(ir.dev_id);
1176 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 return -ENODEV;
1178
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001179 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1180 err = -EBUSY;
1181 goto done;
1182 }
1183
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001184 if (hdev->dev_type != HCI_BREDR) {
1185 err = -EOPNOTSUPP;
1186 goto done;
1187 }
1188
Johan Hedberg56f87902013-10-02 13:43:13 +03001189 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1190 err = -EOPNOTSUPP;
1191 goto done;
1192 }
1193
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001194 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001195 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001196 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001197 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 do_inquiry = 1;
1199 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001200 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Marcel Holtmann04837f62006-07-03 10:02:33 +02001202 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001203
1204 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001205 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1206 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001207 if (err < 0)
1208 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001209
1210 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1211 * cleared). If it is interrupted by a signal, return -EINTR.
1212 */
1213 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1214 TASK_INTERRUPTIBLE))
1215 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001218 /* for unlimited number of responses we will use buffer with
1219 * 255 entries
1220 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1222
1223 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1224 * copy it to the user space.
1225 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001226 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001227 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 err = -ENOMEM;
1229 goto done;
1230 }
1231
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001232 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001234 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
1236 BT_DBG("num_rsp %d", ir.num_rsp);
1237
1238 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1239 ptr += sizeof(ir);
1240 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001241 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001243 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 err = -EFAULT;
1245
1246 kfree(buf);
1247
1248done:
1249 hci_dev_put(hdev);
1250 return err;
1251}
1252
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001253static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 int ret = 0;
1256
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 BT_DBG("%s %p", hdev->name, hdev);
1258
1259 hci_req_lock(hdev);
1260
Johan Hovold94324962012-03-15 14:48:41 +01001261 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1262 ret = -ENODEV;
1263 goto done;
1264 }
1265
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001266 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1267 /* Check for rfkill but allow the HCI setup stage to
1268 * proceed (which in itself doesn't cause any RF activity).
1269 */
1270 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1271 ret = -ERFKILL;
1272 goto done;
1273 }
1274
1275 /* Check for valid public address or a configured static
1276 * random adddress, but let the HCI setup proceed to
1277 * be able to determine if there is a public address
1278 * or not.
1279 *
1280 * This check is only valid for BR/EDR controllers
1281 * since AMP controllers do not have an address.
1282 */
1283 if (hdev->dev_type == HCI_BREDR &&
1284 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1285 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1286 ret = -EADDRNOTAVAIL;
1287 goto done;
1288 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001289 }
1290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 if (test_bit(HCI_UP, &hdev->flags)) {
1292 ret = -EALREADY;
1293 goto done;
1294 }
1295
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 if (hdev->open(hdev)) {
1297 ret = -EIO;
1298 goto done;
1299 }
1300
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001301 atomic_set(&hdev->cmd_cnt, 1);
1302 set_bit(HCI_INIT, &hdev->flags);
1303
1304 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1305 ret = hdev->setup(hdev);
1306
1307 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001308 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1309 set_bit(HCI_RAW, &hdev->flags);
1310
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001311 if (!test_bit(HCI_RAW, &hdev->flags) &&
1312 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001313 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 }
1315
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001316 clear_bit(HCI_INIT, &hdev->flags);
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 if (!ret) {
1319 hci_dev_hold(hdev);
1320 set_bit(HCI_UP, &hdev->flags);
1321 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001322 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001323 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001324 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001325 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001326 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001327 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001328 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001329 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001331 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001332 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001333 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
1335 skb_queue_purge(&hdev->cmd_q);
1336 skb_queue_purge(&hdev->rx_q);
1337
1338 if (hdev->flush)
1339 hdev->flush(hdev);
1340
1341 if (hdev->sent_cmd) {
1342 kfree_skb(hdev->sent_cmd);
1343 hdev->sent_cmd = NULL;
1344 }
1345
1346 hdev->close(hdev);
1347 hdev->flags = 0;
1348 }
1349
1350done:
1351 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 return ret;
1353}
1354
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001355/* ---- HCI ioctl helpers ---- */
1356
1357int hci_dev_open(__u16 dev)
1358{
1359 struct hci_dev *hdev;
1360 int err;
1361
1362 hdev = hci_dev_get(dev);
1363 if (!hdev)
1364 return -ENODEV;
1365
Johan Hedberge1d08f42013-10-01 22:44:50 +03001366 /* We need to ensure that no other power on/off work is pending
1367 * before proceeding to call hci_dev_do_open. This is
1368 * particularly important if the setup procedure has not yet
1369 * completed.
1370 */
1371 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1372 cancel_delayed_work(&hdev->power_off);
1373
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001374 /* After this call it is guaranteed that the setup procedure
1375 * has finished. This means that error conditions like RFKILL
1376 * or no valid public or static random address apply.
1377 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001378 flush_workqueue(hdev->req_workqueue);
1379
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001380 err = hci_dev_do_open(hdev);
1381
1382 hci_dev_put(hdev);
1383
1384 return err;
1385}
1386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387static int hci_dev_do_close(struct hci_dev *hdev)
1388{
1389 BT_DBG("%s %p", hdev->name, hdev);
1390
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001391 cancel_delayed_work(&hdev->power_off);
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 hci_req_cancel(hdev, ENODEV);
1394 hci_req_lock(hdev);
1395
1396 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001397 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 hci_req_unlock(hdev);
1399 return 0;
1400 }
1401
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001402 /* Flush RX and TX works */
1403 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001404 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001406 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001407 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001408 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001409 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001410 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001411 }
1412
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001413 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001414 cancel_delayed_work(&hdev->service_cache);
1415
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001416 cancel_delayed_work_sync(&hdev->le_scan_disable);
1417
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001418 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001419 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001421 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423 hci_notify(hdev, HCI_DEV_DOWN);
1424
1425 if (hdev->flush)
1426 hdev->flush(hdev);
1427
1428 /* Reset device */
1429 skb_queue_purge(&hdev->cmd_q);
1430 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001431 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001432 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001433 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001435 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 clear_bit(HCI_INIT, &hdev->flags);
1437 }
1438
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001439 /* flush cmd work */
1440 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
1442 /* Drop queues */
1443 skb_queue_purge(&hdev->rx_q);
1444 skb_queue_purge(&hdev->cmd_q);
1445 skb_queue_purge(&hdev->raw_q);
1446
1447 /* Drop last sent command */
1448 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001449 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 kfree_skb(hdev->sent_cmd);
1451 hdev->sent_cmd = NULL;
1452 }
1453
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001454 kfree_skb(hdev->recv_evt);
1455 hdev->recv_evt = NULL;
1456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 /* After this point our queues are empty
1458 * and no tasks are scheduled. */
1459 hdev->close(hdev);
1460
Johan Hedberg35b973c2013-03-15 17:06:59 -05001461 /* Clear flags */
1462 hdev->flags = 0;
1463 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1464
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001465 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1466 if (hdev->dev_type == HCI_BREDR) {
1467 hci_dev_lock(hdev);
1468 mgmt_powered(hdev, 0);
1469 hci_dev_unlock(hdev);
1470 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001471 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001472
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001473 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001474 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001475
Johan Hedberge59fda82012-02-22 18:11:53 +02001476 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001477 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 hci_req_unlock(hdev);
1480
1481 hci_dev_put(hdev);
1482 return 0;
1483}
1484
1485int hci_dev_close(__u16 dev)
1486{
1487 struct hci_dev *hdev;
1488 int err;
1489
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001490 hdev = hci_dev_get(dev);
1491 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001493
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001494 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1495 err = -EBUSY;
1496 goto done;
1497 }
1498
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001499 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1500 cancel_delayed_work(&hdev->power_off);
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001503
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001504done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 hci_dev_put(hdev);
1506 return err;
1507}
1508
1509int hci_dev_reset(__u16 dev)
1510{
1511 struct hci_dev *hdev;
1512 int ret = 0;
1513
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001514 hdev = hci_dev_get(dev);
1515 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 return -ENODEV;
1517
1518 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
Marcel Holtmann808a0492013-08-26 20:57:58 -07001520 if (!test_bit(HCI_UP, &hdev->flags)) {
1521 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001525 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1526 ret = -EBUSY;
1527 goto done;
1528 }
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 /* Drop queues */
1531 skb_queue_purge(&hdev->rx_q);
1532 skb_queue_purge(&hdev->cmd_q);
1533
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001534 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001535 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001537 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
1539 if (hdev->flush)
1540 hdev->flush(hdev);
1541
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001542 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001543 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544
1545 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001546 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
1548done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 hci_req_unlock(hdev);
1550 hci_dev_put(hdev);
1551 return ret;
1552}
1553
1554int hci_dev_reset_stat(__u16 dev)
1555{
1556 struct hci_dev *hdev;
1557 int ret = 0;
1558
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001559 hdev = hci_dev_get(dev);
1560 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 return -ENODEV;
1562
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001563 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1564 ret = -EBUSY;
1565 goto done;
1566 }
1567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1569
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001570done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 return ret;
1573}
1574
1575int hci_dev_cmd(unsigned int cmd, void __user *arg)
1576{
1577 struct hci_dev *hdev;
1578 struct hci_dev_req dr;
1579 int err = 0;
1580
1581 if (copy_from_user(&dr, arg, sizeof(dr)))
1582 return -EFAULT;
1583
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001584 hdev = hci_dev_get(dr.dev_id);
1585 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 return -ENODEV;
1587
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001588 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1589 err = -EBUSY;
1590 goto done;
1591 }
1592
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001593 if (hdev->dev_type != HCI_BREDR) {
1594 err = -EOPNOTSUPP;
1595 goto done;
1596 }
1597
Johan Hedberg56f87902013-10-02 13:43:13 +03001598 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1599 err = -EOPNOTSUPP;
1600 goto done;
1601 }
1602
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 switch (cmd) {
1604 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001605 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1606 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 break;
1608
1609 case HCISETENCRYPT:
1610 if (!lmp_encrypt_capable(hdev)) {
1611 err = -EOPNOTSUPP;
1612 break;
1613 }
1614
1615 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1616 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001617 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1618 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 if (err)
1620 break;
1621 }
1622
Johan Hedberg01178cd2013-03-05 20:37:41 +02001623 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1624 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 break;
1626
1627 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001628 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1629 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 break;
1631
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001632 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001633 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1634 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001635 break;
1636
1637 case HCISETLINKMODE:
1638 hdev->link_mode = ((__u16) dr.dev_opt) &
1639 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1640 break;
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 case HCISETPTYPE:
1643 hdev->pkt_type = (__u16) dr.dev_opt;
1644 break;
1645
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001647 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1648 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 break;
1650
1651 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001652 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1653 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 break;
1655
1656 default:
1657 err = -EINVAL;
1658 break;
1659 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001660
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001661done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 hci_dev_put(hdev);
1663 return err;
1664}
1665
1666int hci_get_dev_list(void __user *arg)
1667{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001668 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 struct hci_dev_list_req *dl;
1670 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 int n = 0, size, err;
1672 __u16 dev_num;
1673
1674 if (get_user(dev_num, (__u16 __user *) arg))
1675 return -EFAULT;
1676
1677 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1678 return -EINVAL;
1679
1680 size = sizeof(*dl) + dev_num * sizeof(*dr);
1681
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001682 dl = kzalloc(size, GFP_KERNEL);
1683 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 return -ENOMEM;
1685
1686 dr = dl->dev_req;
1687
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001688 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001689 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001690 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001691 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001692
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001693 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1694 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 (dr + n)->dev_id = hdev->id;
1697 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001698
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 if (++n >= dev_num)
1700 break;
1701 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001702 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704 dl->dev_num = n;
1705 size = sizeof(*dl) + n * sizeof(*dr);
1706
1707 err = copy_to_user(arg, dl, size);
1708 kfree(dl);
1709
1710 return err ? -EFAULT : 0;
1711}
1712
1713int hci_get_dev_info(void __user *arg)
1714{
1715 struct hci_dev *hdev;
1716 struct hci_dev_info di;
1717 int err = 0;
1718
1719 if (copy_from_user(&di, arg, sizeof(di)))
1720 return -EFAULT;
1721
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001722 hdev = hci_dev_get(di.dev_id);
1723 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 return -ENODEV;
1725
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001726 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001727 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001728
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001729 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1730 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 strcpy(di.name, hdev->name);
1733 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001734 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 di.flags = hdev->flags;
1736 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001737 if (lmp_bredr_capable(hdev)) {
1738 di.acl_mtu = hdev->acl_mtu;
1739 di.acl_pkts = hdev->acl_pkts;
1740 di.sco_mtu = hdev->sco_mtu;
1741 di.sco_pkts = hdev->sco_pkts;
1742 } else {
1743 di.acl_mtu = hdev->le_mtu;
1744 di.acl_pkts = hdev->le_pkts;
1745 di.sco_mtu = 0;
1746 di.sco_pkts = 0;
1747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 di.link_policy = hdev->link_policy;
1749 di.link_mode = hdev->link_mode;
1750
1751 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1752 memcpy(&di.features, &hdev->features, sizeof(di.features));
1753
1754 if (copy_to_user(arg, &di, sizeof(di)))
1755 err = -EFAULT;
1756
1757 hci_dev_put(hdev);
1758
1759 return err;
1760}
1761
1762/* ---- Interface to HCI drivers ---- */
1763
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001764static int hci_rfkill_set_block(void *data, bool blocked)
1765{
1766 struct hci_dev *hdev = data;
1767
1768 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1769
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001770 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1771 return -EBUSY;
1772
Johan Hedberg5e130362013-09-13 08:58:17 +03001773 if (blocked) {
1774 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001775 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1776 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001777 } else {
1778 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001779 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001780
1781 return 0;
1782}
1783
1784static const struct rfkill_ops hci_rfkill_ops = {
1785 .set_block = hci_rfkill_set_block,
1786};
1787
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001788static void hci_power_on(struct work_struct *work)
1789{
1790 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001791 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001792
1793 BT_DBG("%s", hdev->name);
1794
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001795 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001796 if (err < 0) {
1797 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001798 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001799 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001800
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001801 /* During the HCI setup phase, a few error conditions are
1802 * ignored and they need to be checked now. If they are still
1803 * valid, it is important to turn the device back off.
1804 */
1805 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1806 (hdev->dev_type == HCI_BREDR &&
1807 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1808 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001809 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1810 hci_dev_do_close(hdev);
1811 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001812 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1813 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001814 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001815
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001816 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001817 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001818}
1819
1820static void hci_power_off(struct work_struct *work)
1821{
Johan Hedberg32435532011-11-07 22:16:04 +02001822 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001823 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001824
1825 BT_DBG("%s", hdev->name);
1826
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001827 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001828}
1829
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001830static void hci_discov_off(struct work_struct *work)
1831{
1832 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001833
1834 hdev = container_of(work, struct hci_dev, discov_off.work);
1835
1836 BT_DBG("%s", hdev->name);
1837
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001838 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001839}
1840
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001841int hci_uuids_clear(struct hci_dev *hdev)
1842{
Johan Hedberg48210022013-01-27 00:31:28 +02001843 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001844
Johan Hedberg48210022013-01-27 00:31:28 +02001845 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1846 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001847 kfree(uuid);
1848 }
1849
1850 return 0;
1851}
1852
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001853int hci_link_keys_clear(struct hci_dev *hdev)
1854{
1855 struct list_head *p, *n;
1856
1857 list_for_each_safe(p, n, &hdev->link_keys) {
1858 struct link_key *key;
1859
1860 key = list_entry(p, struct link_key, list);
1861
1862 list_del(p);
1863 kfree(key);
1864 }
1865
1866 return 0;
1867}
1868
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001869int hci_smp_ltks_clear(struct hci_dev *hdev)
1870{
1871 struct smp_ltk *k, *tmp;
1872
1873 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1874 list_del(&k->list);
1875 kfree(k);
1876 }
1877
1878 return 0;
1879}
1880
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001881struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1882{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001883 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001884
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001885 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001886 if (bacmp(bdaddr, &k->bdaddr) == 0)
1887 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001888
1889 return NULL;
1890}
1891
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301892static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001893 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001894{
1895 /* Legacy key */
1896 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301897 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001898
1899 /* Debug keys are insecure so don't store them persistently */
1900 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301901 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001902
1903 /* Changed combination key and there's no previous one */
1904 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301905 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001906
1907 /* Security mode 3 case */
1908 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301909 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001910
1911 /* Neither local nor remote side had no-bonding as requirement */
1912 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301913 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001914
1915 /* Local side had dedicated bonding as requirement */
1916 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301917 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001918
1919 /* Remote side had dedicated bonding as requirement */
1920 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301921 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001922
1923 /* If none of the above criteria match, then don't store the key
1924 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301925 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001926}
1927
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001928struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001929{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001930 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001931
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001932 list_for_each_entry(k, &hdev->long_term_keys, list) {
1933 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001934 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001935 continue;
1936
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001937 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001938 }
1939
1940 return NULL;
1941}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001942
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001943struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001944 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001945{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001946 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001947
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001948 list_for_each_entry(k, &hdev->long_term_keys, list)
1949 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001950 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001951 return k;
1952
1953 return NULL;
1954}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001955
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001956int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001957 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001958{
1959 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301960 u8 old_key_type;
1961 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001962
1963 old_key = hci_find_link_key(hdev, bdaddr);
1964 if (old_key) {
1965 old_key_type = old_key->type;
1966 key = old_key;
1967 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001968 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001969 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1970 if (!key)
1971 return -ENOMEM;
1972 list_add(&key->list, &hdev->link_keys);
1973 }
1974
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001975 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001976
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001977 /* Some buggy controller combinations generate a changed
1978 * combination key for legacy pairing even when there's no
1979 * previous key */
1980 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001981 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001982 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001983 if (conn)
1984 conn->key_type = type;
1985 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001986
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001987 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001988 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001989 key->pin_len = pin_len;
1990
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001991 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001992 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001993 else
1994 key->type = type;
1995
Johan Hedberg4df378a2011-04-28 11:29:03 -07001996 if (!new_key)
1997 return 0;
1998
1999 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2000
Johan Hedberg744cf192011-11-08 20:40:14 +02002001 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002002
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302003 if (conn)
2004 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002005
2006 return 0;
2007}
2008
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002009int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002010 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002011 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002012{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002013 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002014
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002015 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2016 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002017
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002018 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2019 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002020 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002021 else {
2022 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002023 if (!key)
2024 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002025 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002026 }
2027
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002028 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002029 key->bdaddr_type = addr_type;
2030 memcpy(key->val, tk, sizeof(key->val));
2031 key->authenticated = authenticated;
2032 key->ediv = ediv;
2033 key->enc_size = enc_size;
2034 key->type = type;
2035 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002036
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002037 if (!new_key)
2038 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002039
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002040 if (type & HCI_SMP_LTK)
2041 mgmt_new_ltk(hdev, key, 1);
2042
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002043 return 0;
2044}
2045
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002046int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2047{
2048 struct link_key *key;
2049
2050 key = hci_find_link_key(hdev, bdaddr);
2051 if (!key)
2052 return -ENOENT;
2053
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002054 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002055
2056 list_del(&key->list);
2057 kfree(key);
2058
2059 return 0;
2060}
2061
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002062int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2063{
2064 struct smp_ltk *k, *tmp;
2065
2066 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2067 if (bacmp(bdaddr, &k->bdaddr))
2068 continue;
2069
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002070 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002071
2072 list_del(&k->list);
2073 kfree(k);
2074 }
2075
2076 return 0;
2077}
2078
Ville Tervo6bd32322011-02-16 16:32:41 +02002079/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002080static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002081{
2082 struct hci_dev *hdev = (void *) arg;
2083
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002084 if (hdev->sent_cmd) {
2085 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2086 u16 opcode = __le16_to_cpu(sent->opcode);
2087
2088 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2089 } else {
2090 BT_ERR("%s command tx timeout", hdev->name);
2091 }
2092
Ville Tervo6bd32322011-02-16 16:32:41 +02002093 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002094 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002095}
2096
Szymon Janc2763eda2011-03-22 13:12:22 +01002097struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002098 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002099{
2100 struct oob_data *data;
2101
2102 list_for_each_entry(data, &hdev->remote_oob_data, list)
2103 if (bacmp(bdaddr, &data->bdaddr) == 0)
2104 return data;
2105
2106 return NULL;
2107}
2108
2109int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2110{
2111 struct oob_data *data;
2112
2113 data = hci_find_remote_oob_data(hdev, bdaddr);
2114 if (!data)
2115 return -ENOENT;
2116
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002118
2119 list_del(&data->list);
2120 kfree(data);
2121
2122 return 0;
2123}
2124
2125int hci_remote_oob_data_clear(struct hci_dev *hdev)
2126{
2127 struct oob_data *data, *n;
2128
2129 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2130 list_del(&data->list);
2131 kfree(data);
2132 }
2133
2134 return 0;
2135}
2136
2137int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002138 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002139{
2140 struct oob_data *data;
2141
2142 data = hci_find_remote_oob_data(hdev, bdaddr);
2143
2144 if (!data) {
2145 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2146 if (!data)
2147 return -ENOMEM;
2148
2149 bacpy(&data->bdaddr, bdaddr);
2150 list_add(&data->list, &hdev->remote_oob_data);
2151 }
2152
2153 memcpy(data->hash, hash, sizeof(data->hash));
2154 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2155
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002156 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002157
2158 return 0;
2159}
2160
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002161struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2162 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002163{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002164 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002165
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002166 list_for_each_entry(b, &hdev->blacklist, list) {
2167 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002168 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002169 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002170
2171 return NULL;
2172}
2173
2174int hci_blacklist_clear(struct hci_dev *hdev)
2175{
2176 struct list_head *p, *n;
2177
2178 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002179 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002180
2181 list_del(p);
2182 kfree(b);
2183 }
2184
2185 return 0;
2186}
2187
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002188int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002189{
2190 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002191
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002192 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002193 return -EBADF;
2194
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002195 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002196 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002197
2198 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002199 if (!entry)
2200 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002201
2202 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002203 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002204
2205 list_add(&entry->list, &hdev->blacklist);
2206
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002207 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002208}
2209
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002210int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002211{
2212 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002213
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002214 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002215 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002216
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002217 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002218 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002219 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002220
2221 list_del(&entry->list);
2222 kfree(entry);
2223
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002224 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002225}
2226
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002227static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002228{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002229 if (status) {
2230 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002231
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002232 hci_dev_lock(hdev);
2233 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2234 hci_dev_unlock(hdev);
2235 return;
2236 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002237}
2238
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002239static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002240{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002241 /* General inquiry access code (GIAC) */
2242 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2243 struct hci_request req;
2244 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002245 int err;
2246
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002247 if (status) {
2248 BT_ERR("Failed to disable LE scanning: status %d", status);
2249 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002250 }
2251
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002252 switch (hdev->discovery.type) {
2253 case DISCOV_TYPE_LE:
2254 hci_dev_lock(hdev);
2255 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2256 hci_dev_unlock(hdev);
2257 break;
2258
2259 case DISCOV_TYPE_INTERLEAVED:
2260 hci_req_init(&req, hdev);
2261
2262 memset(&cp, 0, sizeof(cp));
2263 memcpy(&cp.lap, lap, sizeof(cp.lap));
2264 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2265 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2266
2267 hci_dev_lock(hdev);
2268
2269 hci_inquiry_cache_flush(hdev);
2270
2271 err = hci_req_run(&req, inquiry_complete);
2272 if (err) {
2273 BT_ERR("Inquiry request failed: err %d", err);
2274 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2275 }
2276
2277 hci_dev_unlock(hdev);
2278 break;
2279 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002280}
2281
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002282static void le_scan_disable_work(struct work_struct *work)
2283{
2284 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002285 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002286 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002287 struct hci_request req;
2288 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002289
2290 BT_DBG("%s", hdev->name);
2291
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002292 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002293
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002294 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002295 cp.enable = LE_SCAN_DISABLE;
2296 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002297
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002298 err = hci_req_run(&req, le_scan_disable_work_complete);
2299 if (err)
2300 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002301}
2302
David Herrmann9be0dab2012-04-22 14:39:57 +02002303/* Alloc HCI device */
2304struct hci_dev *hci_alloc_dev(void)
2305{
2306 struct hci_dev *hdev;
2307
2308 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2309 if (!hdev)
2310 return NULL;
2311
David Herrmannb1b813d2012-04-22 14:39:58 +02002312 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2313 hdev->esco_type = (ESCO_HV1);
2314 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002315 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2316 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002317 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2318 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002319
David Herrmannb1b813d2012-04-22 14:39:58 +02002320 hdev->sniff_max_interval = 800;
2321 hdev->sniff_min_interval = 80;
2322
Marcel Holtmannbef64732013-10-11 08:23:19 -07002323 hdev->le_scan_interval = 0x0060;
2324 hdev->le_scan_window = 0x0030;
2325
David Herrmannb1b813d2012-04-22 14:39:58 +02002326 mutex_init(&hdev->lock);
2327 mutex_init(&hdev->req_lock);
2328
2329 INIT_LIST_HEAD(&hdev->mgmt_pending);
2330 INIT_LIST_HEAD(&hdev->blacklist);
2331 INIT_LIST_HEAD(&hdev->uuids);
2332 INIT_LIST_HEAD(&hdev->link_keys);
2333 INIT_LIST_HEAD(&hdev->long_term_keys);
2334 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002335 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002336
2337 INIT_WORK(&hdev->rx_work, hci_rx_work);
2338 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2339 INIT_WORK(&hdev->tx_work, hci_tx_work);
2340 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002341
David Herrmannb1b813d2012-04-22 14:39:58 +02002342 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2343 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2344 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2345
David Herrmannb1b813d2012-04-22 14:39:58 +02002346 skb_queue_head_init(&hdev->rx_q);
2347 skb_queue_head_init(&hdev->cmd_q);
2348 skb_queue_head_init(&hdev->raw_q);
2349
2350 init_waitqueue_head(&hdev->req_wait_q);
2351
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002352 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002353
David Herrmannb1b813d2012-04-22 14:39:58 +02002354 hci_init_sysfs(hdev);
2355 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002356
2357 return hdev;
2358}
2359EXPORT_SYMBOL(hci_alloc_dev);
2360
2361/* Free HCI device */
2362void hci_free_dev(struct hci_dev *hdev)
2363{
David Herrmann9be0dab2012-04-22 14:39:57 +02002364 /* will free via device release */
2365 put_device(&hdev->dev);
2366}
2367EXPORT_SYMBOL(hci_free_dev);
2368
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369/* Register HCI device */
2370int hci_register_dev(struct hci_dev *hdev)
2371{
David Herrmannb1b813d2012-04-22 14:39:58 +02002372 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373
David Herrmann010666a2012-01-07 15:47:07 +01002374 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 return -EINVAL;
2376
Mat Martineau08add512011-11-02 16:18:36 -07002377 /* Do not allow HCI_AMP devices to register at index 0,
2378 * so the index can be used as the AMP controller ID.
2379 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002380 switch (hdev->dev_type) {
2381 case HCI_BREDR:
2382 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2383 break;
2384 case HCI_AMP:
2385 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2386 break;
2387 default:
2388 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002390
Sasha Levin3df92b32012-05-27 22:36:56 +02002391 if (id < 0)
2392 return id;
2393
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 sprintf(hdev->name, "hci%d", id);
2395 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002396
2397 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2398
Kees Cookd8537542013-07-03 15:04:57 -07002399 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2400 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002401 if (!hdev->workqueue) {
2402 error = -ENOMEM;
2403 goto err;
2404 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002405
Kees Cookd8537542013-07-03 15:04:57 -07002406 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2407 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002408 if (!hdev->req_workqueue) {
2409 destroy_workqueue(hdev->workqueue);
2410 error = -ENOMEM;
2411 goto err;
2412 }
2413
David Herrmann33ca9542011-10-08 14:58:49 +02002414 error = hci_add_sysfs(hdev);
2415 if (error < 0)
2416 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002418 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002419 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2420 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002421 if (hdev->rfkill) {
2422 if (rfkill_register(hdev->rfkill) < 0) {
2423 rfkill_destroy(hdev->rfkill);
2424 hdev->rfkill = NULL;
2425 }
2426 }
2427
Johan Hedberg5e130362013-09-13 08:58:17 +03002428 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2429 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2430
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002431 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002432 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002433
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002434 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002435 /* Assume BR/EDR support until proven otherwise (such as
2436 * through reading supported features during init.
2437 */
2438 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2439 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002440
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002441 write_lock(&hci_dev_list_lock);
2442 list_add(&hdev->list, &hci_dev_list);
2443 write_unlock(&hci_dev_list_lock);
2444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002446 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
Johan Hedberg19202572013-01-14 22:33:51 +02002448 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002451
David Herrmann33ca9542011-10-08 14:58:49 +02002452err_wqueue:
2453 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002454 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002455err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002456 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002457
David Herrmann33ca9542011-10-08 14:58:49 +02002458 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459}
2460EXPORT_SYMBOL(hci_register_dev);
2461
2462/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002463void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464{
Sasha Levin3df92b32012-05-27 22:36:56 +02002465 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002466
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002467 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
Johan Hovold94324962012-03-15 14:48:41 +01002469 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2470
Sasha Levin3df92b32012-05-27 22:36:56 +02002471 id = hdev->id;
2472
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002473 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002475 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476
2477 hci_dev_do_close(hdev);
2478
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302479 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002480 kfree_skb(hdev->reassembly[i]);
2481
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002482 cancel_work_sync(&hdev->power_on);
2483
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002484 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002485 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002486 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002487 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002488 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002489 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002490
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002491 /* mgmt_index_removed should take care of emptying the
2492 * pending list */
2493 BUG_ON(!list_empty(&hdev->mgmt_pending));
2494
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 hci_notify(hdev, HCI_DEV_UNREG);
2496
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002497 if (hdev->rfkill) {
2498 rfkill_unregister(hdev->rfkill);
2499 rfkill_destroy(hdev->rfkill);
2500 }
2501
David Herrmannce242972011-10-08 14:58:48 +02002502 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002503
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002504 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002505 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002506
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002507 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002508 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002509 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002510 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002511 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002512 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002513 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002514
David Herrmanndc946bd2012-01-07 15:47:24 +01002515 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002516
2517 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518}
2519EXPORT_SYMBOL(hci_unregister_dev);
2520
2521/* Suspend HCI device */
2522int hci_suspend_dev(struct hci_dev *hdev)
2523{
2524 hci_notify(hdev, HCI_DEV_SUSPEND);
2525 return 0;
2526}
2527EXPORT_SYMBOL(hci_suspend_dev);
2528
2529/* Resume HCI device */
2530int hci_resume_dev(struct hci_dev *hdev)
2531{
2532 hci_notify(hdev, HCI_DEV_RESUME);
2533 return 0;
2534}
2535EXPORT_SYMBOL(hci_resume_dev);
2536
Marcel Holtmann76bca882009-11-18 00:40:39 +01002537/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002538int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002539{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002540 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002541 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002542 kfree_skb(skb);
2543 return -ENXIO;
2544 }
2545
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002546 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002547 bt_cb(skb)->incoming = 1;
2548
2549 /* Time stamp */
2550 __net_timestamp(skb);
2551
Marcel Holtmann76bca882009-11-18 00:40:39 +01002552 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002553 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002554
Marcel Holtmann76bca882009-11-18 00:40:39 +01002555 return 0;
2556}
2557EXPORT_SYMBOL(hci_recv_frame);
2558
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302559static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002560 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302561{
2562 int len = 0;
2563 int hlen = 0;
2564 int remain = count;
2565 struct sk_buff *skb;
2566 struct bt_skb_cb *scb;
2567
2568 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002569 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302570 return -EILSEQ;
2571
2572 skb = hdev->reassembly[index];
2573
2574 if (!skb) {
2575 switch (type) {
2576 case HCI_ACLDATA_PKT:
2577 len = HCI_MAX_FRAME_SIZE;
2578 hlen = HCI_ACL_HDR_SIZE;
2579 break;
2580 case HCI_EVENT_PKT:
2581 len = HCI_MAX_EVENT_SIZE;
2582 hlen = HCI_EVENT_HDR_SIZE;
2583 break;
2584 case HCI_SCODATA_PKT:
2585 len = HCI_MAX_SCO_SIZE;
2586 hlen = HCI_SCO_HDR_SIZE;
2587 break;
2588 }
2589
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002590 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302591 if (!skb)
2592 return -ENOMEM;
2593
2594 scb = (void *) skb->cb;
2595 scb->expect = hlen;
2596 scb->pkt_type = type;
2597
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302598 hdev->reassembly[index] = skb;
2599 }
2600
2601 while (count) {
2602 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002603 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302604
2605 memcpy(skb_put(skb, len), data, len);
2606
2607 count -= len;
2608 data += len;
2609 scb->expect -= len;
2610 remain = count;
2611
2612 switch (type) {
2613 case HCI_EVENT_PKT:
2614 if (skb->len == HCI_EVENT_HDR_SIZE) {
2615 struct hci_event_hdr *h = hci_event_hdr(skb);
2616 scb->expect = h->plen;
2617
2618 if (skb_tailroom(skb) < scb->expect) {
2619 kfree_skb(skb);
2620 hdev->reassembly[index] = NULL;
2621 return -ENOMEM;
2622 }
2623 }
2624 break;
2625
2626 case HCI_ACLDATA_PKT:
2627 if (skb->len == HCI_ACL_HDR_SIZE) {
2628 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2629 scb->expect = __le16_to_cpu(h->dlen);
2630
2631 if (skb_tailroom(skb) < scb->expect) {
2632 kfree_skb(skb);
2633 hdev->reassembly[index] = NULL;
2634 return -ENOMEM;
2635 }
2636 }
2637 break;
2638
2639 case HCI_SCODATA_PKT:
2640 if (skb->len == HCI_SCO_HDR_SIZE) {
2641 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2642 scb->expect = h->dlen;
2643
2644 if (skb_tailroom(skb) < scb->expect) {
2645 kfree_skb(skb);
2646 hdev->reassembly[index] = NULL;
2647 return -ENOMEM;
2648 }
2649 }
2650 break;
2651 }
2652
2653 if (scb->expect == 0) {
2654 /* Complete frame */
2655
2656 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002657 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302658
2659 hdev->reassembly[index] = NULL;
2660 return remain;
2661 }
2662 }
2663
2664 return remain;
2665}
2666
Marcel Holtmannef222012007-07-11 06:42:04 +02002667int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2668{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302669 int rem = 0;
2670
Marcel Holtmannef222012007-07-11 06:42:04 +02002671 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2672 return -EILSEQ;
2673
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002674 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002675 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302676 if (rem < 0)
2677 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002678
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302679 data += (count - rem);
2680 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002681 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002682
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302683 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002684}
2685EXPORT_SYMBOL(hci_recv_fragment);
2686
Suraj Sumangala99811512010-07-14 13:02:19 +05302687#define STREAM_REASSEMBLY 0
2688
2689int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2690{
2691 int type;
2692 int rem = 0;
2693
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002694 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302695 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2696
2697 if (!skb) {
2698 struct { char type; } *pkt;
2699
2700 /* Start of the frame */
2701 pkt = data;
2702 type = pkt->type;
2703
2704 data++;
2705 count--;
2706 } else
2707 type = bt_cb(skb)->pkt_type;
2708
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002709 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002710 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302711 if (rem < 0)
2712 return rem;
2713
2714 data += (count - rem);
2715 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002716 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302717
2718 return rem;
2719}
2720EXPORT_SYMBOL(hci_recv_stream_fragment);
2721
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722/* ---- Interface to upper protocols ---- */
2723
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724int hci_register_cb(struct hci_cb *cb)
2725{
2726 BT_DBG("%p name %s", cb, cb->name);
2727
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002728 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002730 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731
2732 return 0;
2733}
2734EXPORT_SYMBOL(hci_register_cb);
2735
2736int hci_unregister_cb(struct hci_cb *cb)
2737{
2738 BT_DBG("%p name %s", cb, cb->name);
2739
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002740 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002742 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
2744 return 0;
2745}
2746EXPORT_SYMBOL(hci_unregister_cb);
2747
Marcel Holtmann51086992013-10-10 14:54:19 -07002748static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002750 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002752 /* Time stamp */
2753 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002755 /* Send copy to monitor */
2756 hci_send_to_monitor(hdev, skb);
2757
2758 if (atomic_read(&hdev->promisc)) {
2759 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002760 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 }
2762
2763 /* Get rid of skb owner, prior to sending to the driver. */
2764 skb_orphan(skb);
2765
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002766 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002767 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768}
2769
Johan Hedberg3119ae92013-03-05 20:37:44 +02002770void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2771{
2772 skb_queue_head_init(&req->cmd_q);
2773 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002774 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002775}
2776
2777int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2778{
2779 struct hci_dev *hdev = req->hdev;
2780 struct sk_buff *skb;
2781 unsigned long flags;
2782
2783 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2784
Andre Guedes5d73e032013-03-08 11:20:16 -03002785 /* If an error occured during request building, remove all HCI
2786 * commands queued on the HCI request queue.
2787 */
2788 if (req->err) {
2789 skb_queue_purge(&req->cmd_q);
2790 return req->err;
2791 }
2792
Johan Hedberg3119ae92013-03-05 20:37:44 +02002793 /* Do not allow empty requests */
2794 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002795 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002796
2797 skb = skb_peek_tail(&req->cmd_q);
2798 bt_cb(skb)->req.complete = complete;
2799
2800 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2801 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2802 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2803
2804 queue_work(hdev->workqueue, &hdev->cmd_work);
2805
2806 return 0;
2807}
2808
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002809static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002810 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811{
2812 int len = HCI_COMMAND_HDR_SIZE + plen;
2813 struct hci_command_hdr *hdr;
2814 struct sk_buff *skb;
2815
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002817 if (!skb)
2818 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
2820 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002821 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 hdr->plen = plen;
2823
2824 if (plen)
2825 memcpy(skb_put(skb, plen), param, plen);
2826
2827 BT_DBG("skb len %d", skb->len);
2828
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002829 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002830
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002831 return skb;
2832}
2833
2834/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002835int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2836 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002837{
2838 struct sk_buff *skb;
2839
2840 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2841
2842 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2843 if (!skb) {
2844 BT_ERR("%s no memory for command", hdev->name);
2845 return -ENOMEM;
2846 }
2847
Johan Hedberg11714b32013-03-05 20:37:47 +02002848 /* Stand-alone HCI commands must be flaged as
2849 * single-command requests.
2850 */
2851 bt_cb(skb)->req.start = true;
2852
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002854 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855
2856 return 0;
2857}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858
Johan Hedberg71c76a12013-03-05 20:37:46 +02002859/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002860void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2861 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002862{
2863 struct hci_dev *hdev = req->hdev;
2864 struct sk_buff *skb;
2865
2866 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2867
Andre Guedes34739c12013-03-08 11:20:18 -03002868 /* If an error occured during request building, there is no point in
2869 * queueing the HCI command. We can simply return.
2870 */
2871 if (req->err)
2872 return;
2873
Johan Hedberg71c76a12013-03-05 20:37:46 +02002874 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2875 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002876 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2877 hdev->name, opcode);
2878 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002879 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002880 }
2881
2882 if (skb_queue_empty(&req->cmd_q))
2883 bt_cb(skb)->req.start = true;
2884
Johan Hedberg02350a72013-04-03 21:50:29 +03002885 bt_cb(skb)->req.event = event;
2886
Johan Hedberg71c76a12013-03-05 20:37:46 +02002887 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002888}
2889
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002890void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2891 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002892{
2893 hci_req_add_ev(req, opcode, plen, param, 0);
2894}
2895
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002897void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898{
2899 struct hci_command_hdr *hdr;
2900
2901 if (!hdev->sent_cmd)
2902 return NULL;
2903
2904 hdr = (void *) hdev->sent_cmd->data;
2905
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002906 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 return NULL;
2908
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002909 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910
2911 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2912}
2913
2914/* Send ACL data */
2915static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2916{
2917 struct hci_acl_hdr *hdr;
2918 int len = skb->len;
2919
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002920 skb_push(skb, HCI_ACL_HDR_SIZE);
2921 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002922 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002923 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2924 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925}
2926
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002927static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002928 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002930 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 struct hci_dev *hdev = conn->hdev;
2932 struct sk_buff *list;
2933
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002934 skb->len = skb_headlen(skb);
2935 skb->data_len = 0;
2936
2937 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002938
2939 switch (hdev->dev_type) {
2940 case HCI_BREDR:
2941 hci_add_acl_hdr(skb, conn->handle, flags);
2942 break;
2943 case HCI_AMP:
2944 hci_add_acl_hdr(skb, chan->handle, flags);
2945 break;
2946 default:
2947 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2948 return;
2949 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002950
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002951 list = skb_shinfo(skb)->frag_list;
2952 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 /* Non fragmented */
2954 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2955
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002956 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 } else {
2958 /* Fragmented */
2959 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2960
2961 skb_shinfo(skb)->frag_list = NULL;
2962
2963 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002964 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002966 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002967
2968 flags &= ~ACL_START;
2969 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 do {
2971 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002972
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002973 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002974 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975
2976 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2977
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002978 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 } while (list);
2980
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002981 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002983}
2984
2985void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2986{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002987 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002988
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002989 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002990
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002991 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002993 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995
2996/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002997void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998{
2999 struct hci_dev *hdev = conn->hdev;
3000 struct hci_sco_hdr hdr;
3001
3002 BT_DBG("%s len %d", hdev->name, skb->len);
3003
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003004 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 hdr.dlen = skb->len;
3006
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003007 skb_push(skb, HCI_SCO_HDR_SIZE);
3008 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003009 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003011 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003012
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003014 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016
3017/* ---- HCI TX task (outgoing data) ---- */
3018
3019/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003020static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3021 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022{
3023 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003024 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003025 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003027 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003029
3030 rcu_read_lock();
3031
3032 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003033 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003035
3036 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3037 continue;
3038
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 num++;
3040
3041 if (c->sent < min) {
3042 min = c->sent;
3043 conn = c;
3044 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003045
3046 if (hci_conn_num(hdev, type) == num)
3047 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 }
3049
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003050 rcu_read_unlock();
3051
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003053 int cnt, q;
3054
3055 switch (conn->type) {
3056 case ACL_LINK:
3057 cnt = hdev->acl_cnt;
3058 break;
3059 case SCO_LINK:
3060 case ESCO_LINK:
3061 cnt = hdev->sco_cnt;
3062 break;
3063 case LE_LINK:
3064 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3065 break;
3066 default:
3067 cnt = 0;
3068 BT_ERR("Unknown link type");
3069 }
3070
3071 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 *quote = q ? q : 1;
3073 } else
3074 *quote = 0;
3075
3076 BT_DBG("conn %p quote %d", conn, *quote);
3077 return conn;
3078}
3079
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003080static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081{
3082 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003083 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084
Ville Tervobae1f5d92011-02-10 22:38:53 -03003085 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003087 rcu_read_lock();
3088
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003090 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003091 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003092 BT_ERR("%s killing stalled connection %pMR",
3093 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003094 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 }
3096 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003097
3098 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099}
3100
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003101static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3102 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003103{
3104 struct hci_conn_hash *h = &hdev->conn_hash;
3105 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003106 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003107 struct hci_conn *conn;
3108 int cnt, q, conn_num = 0;
3109
3110 BT_DBG("%s", hdev->name);
3111
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003112 rcu_read_lock();
3113
3114 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003115 struct hci_chan *tmp;
3116
3117 if (conn->type != type)
3118 continue;
3119
3120 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3121 continue;
3122
3123 conn_num++;
3124
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003125 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003126 struct sk_buff *skb;
3127
3128 if (skb_queue_empty(&tmp->data_q))
3129 continue;
3130
3131 skb = skb_peek(&tmp->data_q);
3132 if (skb->priority < cur_prio)
3133 continue;
3134
3135 if (skb->priority > cur_prio) {
3136 num = 0;
3137 min = ~0;
3138 cur_prio = skb->priority;
3139 }
3140
3141 num++;
3142
3143 if (conn->sent < min) {
3144 min = conn->sent;
3145 chan = tmp;
3146 }
3147 }
3148
3149 if (hci_conn_num(hdev, type) == conn_num)
3150 break;
3151 }
3152
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003153 rcu_read_unlock();
3154
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003155 if (!chan)
3156 return NULL;
3157
3158 switch (chan->conn->type) {
3159 case ACL_LINK:
3160 cnt = hdev->acl_cnt;
3161 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003162 case AMP_LINK:
3163 cnt = hdev->block_cnt;
3164 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003165 case SCO_LINK:
3166 case ESCO_LINK:
3167 cnt = hdev->sco_cnt;
3168 break;
3169 case LE_LINK:
3170 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3171 break;
3172 default:
3173 cnt = 0;
3174 BT_ERR("Unknown link type");
3175 }
3176
3177 q = cnt / num;
3178 *quote = q ? q : 1;
3179 BT_DBG("chan %p quote %d", chan, *quote);
3180 return chan;
3181}
3182
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003183static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3184{
3185 struct hci_conn_hash *h = &hdev->conn_hash;
3186 struct hci_conn *conn;
3187 int num = 0;
3188
3189 BT_DBG("%s", hdev->name);
3190
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003191 rcu_read_lock();
3192
3193 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003194 struct hci_chan *chan;
3195
3196 if (conn->type != type)
3197 continue;
3198
3199 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3200 continue;
3201
3202 num++;
3203
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003204 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003205 struct sk_buff *skb;
3206
3207 if (chan->sent) {
3208 chan->sent = 0;
3209 continue;
3210 }
3211
3212 if (skb_queue_empty(&chan->data_q))
3213 continue;
3214
3215 skb = skb_peek(&chan->data_q);
3216 if (skb->priority >= HCI_PRIO_MAX - 1)
3217 continue;
3218
3219 skb->priority = HCI_PRIO_MAX - 1;
3220
3221 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003222 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003223 }
3224
3225 if (hci_conn_num(hdev, type) == num)
3226 break;
3227 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003228
3229 rcu_read_unlock();
3230
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003231}
3232
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003233static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3234{
3235 /* Calculate count of blocks used by this packet */
3236 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3237}
3238
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003239static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 if (!test_bit(HCI_RAW, &hdev->flags)) {
3242 /* ACL tx timeout must be longer than maximum
3243 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003244 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003245 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003246 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003248}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003250static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003251{
3252 unsigned int cnt = hdev->acl_cnt;
3253 struct hci_chan *chan;
3254 struct sk_buff *skb;
3255 int quote;
3256
3257 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003258
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003259 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003260 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003261 u32 priority = (skb_peek(&chan->data_q))->priority;
3262 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003263 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003264 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003265
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003266 /* Stop if priority has changed */
3267 if (skb->priority < priority)
3268 break;
3269
3270 skb = skb_dequeue(&chan->data_q);
3271
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003272 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003273 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003274
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003275 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 hdev->acl_last_tx = jiffies;
3277
3278 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003279 chan->sent++;
3280 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 }
3282 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003283
3284 if (cnt != hdev->acl_cnt)
3285 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286}
3287
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003288static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003289{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003290 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003291 struct hci_chan *chan;
3292 struct sk_buff *skb;
3293 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003294 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003295
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003296 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003297
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003298 BT_DBG("%s", hdev->name);
3299
3300 if (hdev->dev_type == HCI_AMP)
3301 type = AMP_LINK;
3302 else
3303 type = ACL_LINK;
3304
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003305 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003306 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003307 u32 priority = (skb_peek(&chan->data_q))->priority;
3308 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3309 int blocks;
3310
3311 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003312 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003313
3314 /* Stop if priority has changed */
3315 if (skb->priority < priority)
3316 break;
3317
3318 skb = skb_dequeue(&chan->data_q);
3319
3320 blocks = __get_blocks(hdev, skb);
3321 if (blocks > hdev->block_cnt)
3322 return;
3323
3324 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003325 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003326
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003327 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003328 hdev->acl_last_tx = jiffies;
3329
3330 hdev->block_cnt -= blocks;
3331 quote -= blocks;
3332
3333 chan->sent += blocks;
3334 chan->conn->sent += blocks;
3335 }
3336 }
3337
3338 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003339 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003340}
3341
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003342static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003343{
3344 BT_DBG("%s", hdev->name);
3345
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003346 /* No ACL link over BR/EDR controller */
3347 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3348 return;
3349
3350 /* No AMP link over AMP controller */
3351 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003352 return;
3353
3354 switch (hdev->flow_ctl_mode) {
3355 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3356 hci_sched_acl_pkt(hdev);
3357 break;
3358
3359 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3360 hci_sched_acl_blk(hdev);
3361 break;
3362 }
3363}
3364
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003366static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367{
3368 struct hci_conn *conn;
3369 struct sk_buff *skb;
3370 int quote;
3371
3372 BT_DBG("%s", hdev->name);
3373
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003374 if (!hci_conn_num(hdev, SCO_LINK))
3375 return;
3376
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3378 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3379 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003380 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381
3382 conn->sent++;
3383 if (conn->sent == ~0)
3384 conn->sent = 0;
3385 }
3386 }
3387}
3388
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003389static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003390{
3391 struct hci_conn *conn;
3392 struct sk_buff *skb;
3393 int quote;
3394
3395 BT_DBG("%s", hdev->name);
3396
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003397 if (!hci_conn_num(hdev, ESCO_LINK))
3398 return;
3399
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003400 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3401 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003402 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3403 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003404 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003405
3406 conn->sent++;
3407 if (conn->sent == ~0)
3408 conn->sent = 0;
3409 }
3410 }
3411}
3412
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003413static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003414{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003415 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003416 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003417 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003418
3419 BT_DBG("%s", hdev->name);
3420
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003421 if (!hci_conn_num(hdev, LE_LINK))
3422 return;
3423
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003424 if (!test_bit(HCI_RAW, &hdev->flags)) {
3425 /* LE tx timeout must be longer than maximum
3426 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003427 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003428 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003429 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003430 }
3431
3432 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003433 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003434 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003435 u32 priority = (skb_peek(&chan->data_q))->priority;
3436 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003437 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003438 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003439
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003440 /* Stop if priority has changed */
3441 if (skb->priority < priority)
3442 break;
3443
3444 skb = skb_dequeue(&chan->data_q);
3445
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003446 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003447 hdev->le_last_tx = jiffies;
3448
3449 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003450 chan->sent++;
3451 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003452 }
3453 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003454
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003455 if (hdev->le_pkts)
3456 hdev->le_cnt = cnt;
3457 else
3458 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003459
3460 if (cnt != tmp)
3461 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003462}
3463
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003464static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003466 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 struct sk_buff *skb;
3468
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003469 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003470 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471
Marcel Holtmann52de5992013-09-03 18:08:38 -07003472 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3473 /* Schedule queues and send stuff to HCI driver */
3474 hci_sched_acl(hdev);
3475 hci_sched_sco(hdev);
3476 hci_sched_esco(hdev);
3477 hci_sched_le(hdev);
3478 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003479
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 /* Send next queued raw (unknown type) packet */
3481 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003482 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483}
3484
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003485/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486
3487/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003488static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489{
3490 struct hci_acl_hdr *hdr = (void *) skb->data;
3491 struct hci_conn *conn;
3492 __u16 handle, flags;
3493
3494 skb_pull(skb, HCI_ACL_HDR_SIZE);
3495
3496 handle = __le16_to_cpu(hdr->handle);
3497 flags = hci_flags(handle);
3498 handle = hci_handle(handle);
3499
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003500 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003501 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
3503 hdev->stat.acl_rx++;
3504
3505 hci_dev_lock(hdev);
3506 conn = hci_conn_hash_lookup_handle(hdev, handle);
3507 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003508
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003510 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003511
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003513 l2cap_recv_acldata(conn, skb, flags);
3514 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003516 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003517 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 }
3519
3520 kfree_skb(skb);
3521}
3522
3523/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003524static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525{
3526 struct hci_sco_hdr *hdr = (void *) skb->data;
3527 struct hci_conn *conn;
3528 __u16 handle;
3529
3530 skb_pull(skb, HCI_SCO_HDR_SIZE);
3531
3532 handle = __le16_to_cpu(hdr->handle);
3533
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003534 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535
3536 hdev->stat.sco_rx++;
3537
3538 hci_dev_lock(hdev);
3539 conn = hci_conn_hash_lookup_handle(hdev, handle);
3540 hci_dev_unlock(hdev);
3541
3542 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003544 sco_recv_scodata(conn, skb);
3545 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003547 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003548 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 }
3550
3551 kfree_skb(skb);
3552}
3553
Johan Hedberg9238f362013-03-05 20:37:48 +02003554static bool hci_req_is_complete(struct hci_dev *hdev)
3555{
3556 struct sk_buff *skb;
3557
3558 skb = skb_peek(&hdev->cmd_q);
3559 if (!skb)
3560 return true;
3561
3562 return bt_cb(skb)->req.start;
3563}
3564
Johan Hedberg42c6b122013-03-05 20:37:49 +02003565static void hci_resend_last(struct hci_dev *hdev)
3566{
3567 struct hci_command_hdr *sent;
3568 struct sk_buff *skb;
3569 u16 opcode;
3570
3571 if (!hdev->sent_cmd)
3572 return;
3573
3574 sent = (void *) hdev->sent_cmd->data;
3575 opcode = __le16_to_cpu(sent->opcode);
3576 if (opcode == HCI_OP_RESET)
3577 return;
3578
3579 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3580 if (!skb)
3581 return;
3582
3583 skb_queue_head(&hdev->cmd_q, skb);
3584 queue_work(hdev->workqueue, &hdev->cmd_work);
3585}
3586
Johan Hedberg9238f362013-03-05 20:37:48 +02003587void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3588{
3589 hci_req_complete_t req_complete = NULL;
3590 struct sk_buff *skb;
3591 unsigned long flags;
3592
3593 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3594
Johan Hedberg42c6b122013-03-05 20:37:49 +02003595 /* If the completed command doesn't match the last one that was
3596 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003597 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003598 if (!hci_sent_cmd_data(hdev, opcode)) {
3599 /* Some CSR based controllers generate a spontaneous
3600 * reset complete event during init and any pending
3601 * command will never be completed. In such a case we
3602 * need to resend whatever was the last sent
3603 * command.
3604 */
3605 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3606 hci_resend_last(hdev);
3607
Johan Hedberg9238f362013-03-05 20:37:48 +02003608 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003609 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003610
3611 /* If the command succeeded and there's still more commands in
3612 * this request the request is not yet complete.
3613 */
3614 if (!status && !hci_req_is_complete(hdev))
3615 return;
3616
3617 /* If this was the last command in a request the complete
3618 * callback would be found in hdev->sent_cmd instead of the
3619 * command queue (hdev->cmd_q).
3620 */
3621 if (hdev->sent_cmd) {
3622 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003623
3624 if (req_complete) {
3625 /* We must set the complete callback to NULL to
3626 * avoid calling the callback more than once if
3627 * this function gets called again.
3628 */
3629 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3630
Johan Hedberg9238f362013-03-05 20:37:48 +02003631 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003632 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003633 }
3634
3635 /* Remove all pending commands belonging to this request */
3636 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3637 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3638 if (bt_cb(skb)->req.start) {
3639 __skb_queue_head(&hdev->cmd_q, skb);
3640 break;
3641 }
3642
3643 req_complete = bt_cb(skb)->req.complete;
3644 kfree_skb(skb);
3645 }
3646 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3647
3648call_complete:
3649 if (req_complete)
3650 req_complete(hdev, status);
3651}
3652
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003653static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003655 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 struct sk_buff *skb;
3657
3658 BT_DBG("%s", hdev->name);
3659
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003661 /* Send copy to monitor */
3662 hci_send_to_monitor(hdev, skb);
3663
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 if (atomic_read(&hdev->promisc)) {
3665 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003666 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667 }
3668
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003669 if (test_bit(HCI_RAW, &hdev->flags) ||
3670 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671 kfree_skb(skb);
3672 continue;
3673 }
3674
3675 if (test_bit(HCI_INIT, &hdev->flags)) {
3676 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003677 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678 case HCI_ACLDATA_PKT:
3679 case HCI_SCODATA_PKT:
3680 kfree_skb(skb);
3681 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003682 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 }
3684
3685 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003686 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003688 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 hci_event_packet(hdev, skb);
3690 break;
3691
3692 case HCI_ACLDATA_PKT:
3693 BT_DBG("%s ACL data packet", hdev->name);
3694 hci_acldata_packet(hdev, skb);
3695 break;
3696
3697 case HCI_SCODATA_PKT:
3698 BT_DBG("%s SCO data packet", hdev->name);
3699 hci_scodata_packet(hdev, skb);
3700 break;
3701
3702 default:
3703 kfree_skb(skb);
3704 break;
3705 }
3706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707}
3708
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003709static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003711 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 struct sk_buff *skb;
3713
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003714 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3715 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003718 if (atomic_read(&hdev->cmd_cnt)) {
3719 skb = skb_dequeue(&hdev->cmd_q);
3720 if (!skb)
3721 return;
3722
Wei Yongjun7585b972009-02-25 18:29:52 +08003723 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003725 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003726 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003728 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003729 if (test_bit(HCI_RESET, &hdev->flags))
3730 del_timer(&hdev->cmd_timer);
3731 else
3732 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003733 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734 } else {
3735 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003736 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 }
3738 }
3739}