blob: de59bb17f8cdafa1d9a63138c12eccd841818811 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070058/* ---- HCI debugfs entries ---- */
59
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070060static int blacklist_show(struct seq_file *f, void *p)
61{
62 struct hci_dev *hdev = f->private;
63 struct bdaddr_list *b;
64
65 hci_dev_lock(hdev);
66 list_for_each_entry(b, &hdev->blacklist, list)
67 seq_printf(f, "%pMR\n", &b->bdaddr);
68 hci_dev_unlock(hdev);
69
70 return 0;
71}
72
73static int blacklist_open(struct inode *inode, struct file *file)
74{
75 return single_open(file, blacklist_show, inode->i_private);
76}
77
78static const struct file_operations blacklist_fops = {
79 .open = blacklist_open,
80 .read = seq_read,
81 .llseek = seq_lseek,
82 .release = single_release,
83};
84
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070085static int inquiry_cache_show(struct seq_file *f, void *p)
86{
87 struct hci_dev *hdev = f->private;
88 struct discovery_state *cache = &hdev->discovery;
89 struct inquiry_entry *e;
90
91 hci_dev_lock(hdev);
92
93 list_for_each_entry(e, &cache->all, all) {
94 struct inquiry_data *data = &e->data;
95 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
96 &data->bdaddr,
97 data->pscan_rep_mode, data->pscan_period_mode,
98 data->pscan_mode, data->dev_class[2],
99 data->dev_class[1], data->dev_class[0],
100 __le16_to_cpu(data->clock_offset),
101 data->rssi, data->ssp_mode, e->timestamp);
102 }
103
104 hci_dev_unlock(hdev);
105
106 return 0;
107}
108
109static int inquiry_cache_open(struct inode *inode, struct file *file)
110{
111 return single_open(file, inquiry_cache_show, inode->i_private);
112}
113
114static const struct file_operations inquiry_cache_fops = {
115 .open = inquiry_cache_open,
116 .read = seq_read,
117 .llseek = seq_lseek,
118 .release = single_release,
119};
120
Marcel Holtmann041000b2013-10-17 12:02:31 -0700121static int voice_setting_get(void *data, u64 *val)
122{
123 struct hci_dev *hdev = data;
124
125 hci_dev_lock(hdev);
126 *val = hdev->voice_setting;
127 hci_dev_unlock(hdev);
128
129 return 0;
130}
131
132DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
133 NULL, "0x%4.4llx\n");
134
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700135static int auto_accept_delay_set(void *data, u64 val)
136{
137 struct hci_dev *hdev = data;
138
139 hci_dev_lock(hdev);
140 hdev->auto_accept_delay = val;
141 hci_dev_unlock(hdev);
142
143 return 0;
144}
145
146static int auto_accept_delay_get(void *data, u64 *val)
147{
148 struct hci_dev *hdev = data;
149
150 hci_dev_lock(hdev);
151 *val = hdev->auto_accept_delay;
152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
158 auto_accept_delay_set, "%llu\n");
159
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700160static int static_address_show(struct seq_file *f, void *p)
161{
162 struct hci_dev *hdev = f->private;
163
164 hci_dev_lock(hdev);
165 seq_printf(f, "%pMR\n", &hdev->static_addr);
166 hci_dev_unlock(hdev);
167
168 return 0;
169}
170
171static int static_address_open(struct inode *inode, struct file *file)
172{
173 return single_open(file, static_address_show, inode->i_private);
174}
175
176static const struct file_operations static_address_fops = {
177 .open = static_address_open,
178 .read = seq_read,
179 .llseek = seq_lseek,
180 .release = single_release,
181};
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/* ---- HCI requests ---- */
184
Johan Hedberg42c6b122013-03-05 20:37:49 +0200185static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200187 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 if (hdev->req_status == HCI_REQ_PEND) {
190 hdev->req_result = result;
191 hdev->req_status = HCI_REQ_DONE;
192 wake_up_interruptible(&hdev->req_wait_q);
193 }
194}
195
196static void hci_req_cancel(struct hci_dev *hdev, int err)
197{
198 BT_DBG("%s err 0x%2.2x", hdev->name, err);
199
200 if (hdev->req_status == HCI_REQ_PEND) {
201 hdev->req_result = err;
202 hdev->req_status = HCI_REQ_CANCELED;
203 wake_up_interruptible(&hdev->req_wait_q);
204 }
205}
206
Fengguang Wu77a63e02013-04-20 16:24:31 +0300207static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
208 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300209{
210 struct hci_ev_cmd_complete *ev;
211 struct hci_event_hdr *hdr;
212 struct sk_buff *skb;
213
214 hci_dev_lock(hdev);
215
216 skb = hdev->recv_evt;
217 hdev->recv_evt = NULL;
218
219 hci_dev_unlock(hdev);
220
221 if (!skb)
222 return ERR_PTR(-ENODATA);
223
224 if (skb->len < sizeof(*hdr)) {
225 BT_ERR("Too short HCI event");
226 goto failed;
227 }
228
229 hdr = (void *) skb->data;
230 skb_pull(skb, HCI_EVENT_HDR_SIZE);
231
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300232 if (event) {
233 if (hdr->evt != event)
234 goto failed;
235 return skb;
236 }
237
Johan Hedberg75e84b72013-04-02 13:35:04 +0300238 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
239 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
240 goto failed;
241 }
242
243 if (skb->len < sizeof(*ev)) {
244 BT_ERR("Too short cmd_complete event");
245 goto failed;
246 }
247
248 ev = (void *) skb->data;
249 skb_pull(skb, sizeof(*ev));
250
251 if (opcode == __le16_to_cpu(ev->opcode))
252 return skb;
253
254 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
255 __le16_to_cpu(ev->opcode));
256
257failed:
258 kfree_skb(skb);
259 return ERR_PTR(-ENODATA);
260}
261
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300262struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300263 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300264{
265 DECLARE_WAITQUEUE(wait, current);
266 struct hci_request req;
267 int err = 0;
268
269 BT_DBG("%s", hdev->name);
270
271 hci_req_init(&req, hdev);
272
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300273 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300274
275 hdev->req_status = HCI_REQ_PEND;
276
277 err = hci_req_run(&req, hci_req_sync_complete);
278 if (err < 0)
279 return ERR_PTR(err);
280
281 add_wait_queue(&hdev->req_wait_q, &wait);
282 set_current_state(TASK_INTERRUPTIBLE);
283
284 schedule_timeout(timeout);
285
286 remove_wait_queue(&hdev->req_wait_q, &wait);
287
288 if (signal_pending(current))
289 return ERR_PTR(-EINTR);
290
291 switch (hdev->req_status) {
292 case HCI_REQ_DONE:
293 err = -bt_to_errno(hdev->req_result);
294 break;
295
296 case HCI_REQ_CANCELED:
297 err = -hdev->req_result;
298 break;
299
300 default:
301 err = -ETIMEDOUT;
302 break;
303 }
304
305 hdev->req_status = hdev->req_result = 0;
306
307 BT_DBG("%s end: err %d", hdev->name, err);
308
309 if (err < 0)
310 return ERR_PTR(err);
311
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300312 return hci_get_cmd_complete(hdev, opcode, event);
313}
314EXPORT_SYMBOL(__hci_cmd_sync_ev);
315
316struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300317 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300318{
319 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300320}
321EXPORT_SYMBOL(__hci_cmd_sync);
322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200324static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 void (*func)(struct hci_request *req,
326 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200327 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 DECLARE_WAITQUEUE(wait, current);
331 int err = 0;
332
333 BT_DBG("%s start", hdev->name);
334
Johan Hedberg42c6b122013-03-05 20:37:49 +0200335 hci_req_init(&req, hdev);
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 hdev->req_status = HCI_REQ_PEND;
338
Johan Hedberg42c6b122013-03-05 20:37:49 +0200339 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200340
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341 err = hci_req_run(&req, hci_req_sync_complete);
342 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200343 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300344
345 /* ENODATA means the HCI request command queue is empty.
346 * This can happen when a request with conditionals doesn't
347 * trigger any commands to be sent. This is normal behavior
348 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200349 */
Andre Guedes920c8302013-03-08 11:20:15 -0300350 if (err == -ENODATA)
351 return 0;
352
353 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200354 }
355
Andre Guedesbc4445c2013-03-08 11:20:13 -0300356 add_wait_queue(&hdev->req_wait_q, &wait);
357 set_current_state(TASK_INTERRUPTIBLE);
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 schedule_timeout(timeout);
360
361 remove_wait_queue(&hdev->req_wait_q, &wait);
362
363 if (signal_pending(current))
364 return -EINTR;
365
366 switch (hdev->req_status) {
367 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700368 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 break;
370
371 case HCI_REQ_CANCELED:
372 err = -hdev->req_result;
373 break;
374
375 default:
376 err = -ETIMEDOUT;
377 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
Johan Hedberga5040ef2011-01-10 13:28:59 +0200380 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 BT_DBG("%s end: err %d", hdev->name, err);
383
384 return err;
385}
386
Johan Hedberg01178cd2013-03-05 20:37:41 +0200387static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 void (*req)(struct hci_request *req,
389 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200390 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
392 int ret;
393
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200394 if (!test_bit(HCI_UP, &hdev->flags))
395 return -ENETDOWN;
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 /* Serialize all requests */
398 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200399 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 hci_req_unlock(hdev);
401
402 return ret;
403}
404
Johan Hedberg42c6b122013-03-05 20:37:49 +0200405static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
409 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 set_bit(HCI_RESET, &req->hdev->flags);
411 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412}
413
Johan Hedberg42c6b122013-03-05 20:37:49 +0200414static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200416 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200421 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200423
424 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200425 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426}
427
Johan Hedberg42c6b122013-03-05 20:37:49 +0200428static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200431
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200433 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300434
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700435 /* Read Local Supported Commands */
436 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
437
438 /* Read Local Supported Features */
439 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
440
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300441 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200442 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300443
444 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700446
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700447 /* Read Flow Control Mode */
448 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
449
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700450 /* Read Location Data */
451 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200452}
453
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200455{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200457
458 BT_DBG("%s %ld", hdev->name, opt);
459
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300460 /* Reset */
461 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200462 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300463
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200464 switch (hdev->dev_type) {
465 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200467 break;
468
469 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200470 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200471 break;
472
473 default:
474 BT_ERR("Unknown device type %d", hdev->dev_type);
475 break;
476 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200477}
478
Johan Hedberg42c6b122013-03-05 20:37:49 +0200479static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700481 struct hci_dev *hdev = req->hdev;
482
Johan Hedberg2177bab2013-03-05 20:37:43 +0200483 __le16 param;
484 __u8 flt_type;
485
486 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200487 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200488
489 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
492 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200493 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200494
495 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200497
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700498 /* Read Number of Supported IAC */
499 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
500
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700501 /* Read Current IAC LAP */
502 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
503
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504 /* Clear Event Filters */
505 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 /* Connection accept timeout ~20 secs */
509 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200511
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700512 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
513 * but it does not support page scan related HCI commands.
514 */
515 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500516 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
517 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
518 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200519}
520
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300523 struct hci_dev *hdev = req->hdev;
524
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
528 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200529 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200530
531 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536
537 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300539
540 /* LE-only controllers have LE implicitly enabled */
541 if (!lmp_bredr_capable(hdev))
542 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200543}
544
545static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
546{
547 if (lmp_ext_inq_capable(hdev))
548 return 0x02;
549
550 if (lmp_inq_rssi_capable(hdev))
551 return 0x01;
552
553 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
554 hdev->lmp_subver == 0x0757)
555 return 0x01;
556
557 if (hdev->manufacturer == 15) {
558 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
559 return 0x01;
560 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
561 return 0x01;
562 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
563 return 0x01;
564 }
565
566 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
567 hdev->lmp_subver == 0x1805)
568 return 0x01;
569
570 return 0x00;
571}
572
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200574{
575 u8 mode;
576
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200580}
581
Johan Hedberg42c6b122013-03-05 20:37:49 +0200582static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200583{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200584 struct hci_dev *hdev = req->hdev;
585
Johan Hedberg2177bab2013-03-05 20:37:43 +0200586 /* The second byte is 0xff instead of 0x9f (two reserved bits
587 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
588 * command otherwise.
589 */
590 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
591
592 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
593 * any event mask for pre 1.2 devices.
594 */
595 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
596 return;
597
598 if (lmp_bredr_capable(hdev)) {
599 events[4] |= 0x01; /* Flow Specification Complete */
600 events[4] |= 0x02; /* Inquiry Result with RSSI */
601 events[4] |= 0x04; /* Read Remote Extended Features Complete */
602 events[5] |= 0x08; /* Synchronous Connection Complete */
603 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700604 } else {
605 /* Use a different default for LE-only devices */
606 memset(events, 0, sizeof(events));
607 events[0] |= 0x10; /* Disconnection Complete */
608 events[0] |= 0x80; /* Encryption Change */
609 events[1] |= 0x08; /* Read Remote Version Information Complete */
610 events[1] |= 0x20; /* Command Complete */
611 events[1] |= 0x40; /* Command Status */
612 events[1] |= 0x80; /* Hardware Error */
613 events[2] |= 0x04; /* Number of Completed Packets */
614 events[3] |= 0x02; /* Data Buffer Overflow */
615 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616 }
617
618 if (lmp_inq_rssi_capable(hdev))
619 events[4] |= 0x02; /* Inquiry Result with RSSI */
620
621 if (lmp_sniffsubr_capable(hdev))
622 events[5] |= 0x20; /* Sniff Subrating */
623
624 if (lmp_pause_enc_capable(hdev))
625 events[5] |= 0x80; /* Encryption Key Refresh Complete */
626
627 if (lmp_ext_inq_capable(hdev))
628 events[5] |= 0x40; /* Extended Inquiry Result */
629
630 if (lmp_no_flush_capable(hdev))
631 events[7] |= 0x01; /* Enhanced Flush Complete */
632
633 if (lmp_lsto_capable(hdev))
634 events[6] |= 0x80; /* Link Supervision Timeout Changed */
635
636 if (lmp_ssp_capable(hdev)) {
637 events[6] |= 0x01; /* IO Capability Request */
638 events[6] |= 0x02; /* IO Capability Response */
639 events[6] |= 0x04; /* User Confirmation Request */
640 events[6] |= 0x08; /* User Passkey Request */
641 events[6] |= 0x10; /* Remote OOB Data Request */
642 events[6] |= 0x20; /* Simple Pairing Complete */
643 events[7] |= 0x04; /* User Passkey Notification */
644 events[7] |= 0x08; /* Keypress Notification */
645 events[7] |= 0x10; /* Remote Host Supported
646 * Features Notification
647 */
648 }
649
650 if (lmp_le_capable(hdev))
651 events[7] |= 0x20; /* LE Meta-Event */
652
Johan Hedberg42c6b122013-03-05 20:37:49 +0200653 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200654
655 if (lmp_le_capable(hdev)) {
656 memset(events, 0, sizeof(events));
657 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
659 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660 }
661}
662
Johan Hedberg42c6b122013-03-05 20:37:49 +0200663static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200664{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 struct hci_dev *hdev = req->hdev;
666
Johan Hedberg2177bab2013-03-05 20:37:43 +0200667 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300669 else
670 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200671
672 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200674
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200676
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300677 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
678 * local supported commands HCI command.
679 */
680 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200681 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200682
683 if (lmp_ssp_capable(hdev)) {
684 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
685 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
687 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200688 } else {
689 struct hci_cp_write_eir cp;
690
691 memset(hdev->eir, 0, sizeof(hdev->eir));
692 memset(&cp, 0, sizeof(cp));
693
Johan Hedberg42c6b122013-03-05 20:37:49 +0200694 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695 }
696 }
697
698 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200699 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200700
701 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200702 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200703
704 if (lmp_ext_feat_capable(hdev)) {
705 struct hci_cp_read_local_ext_features cp;
706
707 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200708 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
709 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200710 }
711
712 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
713 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200714 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
715 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716 }
717}
718
Johan Hedberg42c6b122013-03-05 20:37:49 +0200719static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200720{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200721 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200722 struct hci_cp_write_def_link_policy cp;
723 u16 link_policy = 0;
724
725 if (lmp_rswitch_capable(hdev))
726 link_policy |= HCI_LP_RSWITCH;
727 if (lmp_hold_capable(hdev))
728 link_policy |= HCI_LP_HOLD;
729 if (lmp_sniff_capable(hdev))
730 link_policy |= HCI_LP_SNIFF;
731 if (lmp_park_capable(hdev))
732 link_policy |= HCI_LP_PARK;
733
734 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200736}
737
Johan Hedberg42c6b122013-03-05 20:37:49 +0200738static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200739{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200741 struct hci_cp_write_le_host_supported cp;
742
Johan Hedbergc73eee92013-04-19 18:35:21 +0300743 /* LE-only devices do not support explicit enablement */
744 if (!lmp_bredr_capable(hdev))
745 return;
746
Johan Hedberg2177bab2013-03-05 20:37:43 +0200747 memset(&cp, 0, sizeof(cp));
748
749 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
750 cp.le = 0x01;
751 cp.simul = lmp_le_br_capable(hdev);
752 }
753
754 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200755 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
756 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200757}
758
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300759static void hci_set_event_mask_page_2(struct hci_request *req)
760{
761 struct hci_dev *hdev = req->hdev;
762 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
763
764 /* If Connectionless Slave Broadcast master role is supported
765 * enable all necessary events for it.
766 */
767 if (hdev->features[2][0] & 0x01) {
768 events[1] |= 0x40; /* Triggered Clock Capture */
769 events[1] |= 0x80; /* Synchronization Train Complete */
770 events[2] |= 0x10; /* Slave Page Response Timeout */
771 events[2] |= 0x20; /* CSB Channel Map Change */
772 }
773
774 /* If Connectionless Slave Broadcast slave role is supported
775 * enable all necessary events for it.
776 */
777 if (hdev->features[2][0] & 0x02) {
778 events[2] |= 0x01; /* Synchronization Train Received */
779 events[2] |= 0x02; /* CSB Receive */
780 events[2] |= 0x04; /* CSB Timeout */
781 events[2] |= 0x08; /* Truncated Page Complete */
782 }
783
784 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
785}
786
Johan Hedberg42c6b122013-03-05 20:37:49 +0200787static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200788{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200789 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300790 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200791
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100792 /* Some Broadcom based Bluetooth controllers do not support the
793 * Delete Stored Link Key command. They are clearly indicating its
794 * absence in the bit mask of supported commands.
795 *
796 * Check the supported commands and only if the the command is marked
797 * as supported send it. If not supported assume that the controller
798 * does not have actual support for stored link keys which makes this
799 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700800 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300801 if (hdev->commands[6] & 0x80) {
802 struct hci_cp_delete_stored_link_key cp;
803
804 bacpy(&cp.bdaddr, BDADDR_ANY);
805 cp.delete_all = 0x01;
806 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
807 sizeof(cp), &cp);
808 }
809
Johan Hedberg2177bab2013-03-05 20:37:43 +0200810 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200811 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200812
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700813 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200814 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300815
816 /* Read features beyond page 1 if available */
817 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
818 struct hci_cp_read_local_ext_features cp;
819
820 cp.page = p;
821 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
822 sizeof(cp), &cp);
823 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200824}
825
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300826static void hci_init4_req(struct hci_request *req, unsigned long opt)
827{
828 struct hci_dev *hdev = req->hdev;
829
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300830 /* Set event mask page 2 if the HCI command for it is supported */
831 if (hdev->commands[22] & 0x04)
832 hci_set_event_mask_page_2(req);
833
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300834 /* Check for Synchronization Train support */
835 if (hdev->features[2][0] & 0x04)
836 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
837}
838
Johan Hedberg2177bab2013-03-05 20:37:43 +0200839static int __hci_init(struct hci_dev *hdev)
840{
841 int err;
842
843 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
844 if (err < 0)
845 return err;
846
847 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
848 * BR/EDR/LE type controllers. AMP controllers only need the
849 * first stage init.
850 */
851 if (hdev->dev_type != HCI_BREDR)
852 return 0;
853
854 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
855 if (err < 0)
856 return err;
857
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300858 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
859 if (err < 0)
860 return err;
861
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700862 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
863 if (err < 0)
864 return err;
865
866 /* Only create debugfs entries during the initial setup
867 * phase and not every time the controller gets powered on.
868 */
869 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
870 return 0;
871
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700872 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
873 &blacklist_fops);
874
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700875 if (lmp_bredr_capable(hdev)) {
876 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
877 hdev, &inquiry_cache_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -0700878 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
879 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700880 }
881
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700882 if (lmp_ssp_capable(hdev))
883 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
884 hdev, &auto_accept_delay_fops);
885
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700886 if (lmp_le_capable(hdev))
887 debugfs_create_file("static_address", 0444, hdev->debugfs,
888 hdev, &static_address_fops);
889
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700890 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200891}
892
Johan Hedberg42c6b122013-03-05 20:37:49 +0200893static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894{
895 __u8 scan = opt;
896
Johan Hedberg42c6b122013-03-05 20:37:49 +0200897 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200900 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
902
Johan Hedberg42c6b122013-03-05 20:37:49 +0200903static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
905 __u8 auth = opt;
906
Johan Hedberg42c6b122013-03-05 20:37:49 +0200907 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
909 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200910 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911}
912
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
915 __u8 encrypt = opt;
916
Johan Hedberg42c6b122013-03-05 20:37:49 +0200917 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200919 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200920 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921}
922
Johan Hedberg42c6b122013-03-05 20:37:49 +0200923static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200924{
925 __le16 policy = cpu_to_le16(opt);
926
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200928
929 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200930 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200931}
932
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900933/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 * Device is held on return. */
935struct hci_dev *hci_dev_get(int index)
936{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200937 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 BT_DBG("%d", index);
940
941 if (index < 0)
942 return NULL;
943
944 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200945 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 if (d->id == index) {
947 hdev = hci_dev_hold(d);
948 break;
949 }
950 }
951 read_unlock(&hci_dev_list_lock);
952 return hdev;
953}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200956
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200957bool hci_discovery_active(struct hci_dev *hdev)
958{
959 struct discovery_state *discov = &hdev->discovery;
960
Andre Guedes6fbe1952012-02-03 17:47:58 -0300961 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300962 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300963 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200964 return true;
965
Andre Guedes6fbe1952012-02-03 17:47:58 -0300966 default:
967 return false;
968 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200969}
970
Johan Hedbergff9ef572012-01-04 14:23:45 +0200971void hci_discovery_set_state(struct hci_dev *hdev, int state)
972{
973 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
974
975 if (hdev->discovery.state == state)
976 return;
977
978 switch (state) {
979 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300980 if (hdev->discovery.state != DISCOVERY_STARTING)
981 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200982 break;
983 case DISCOVERY_STARTING:
984 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300985 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200986 mgmt_discovering(hdev, 1);
987 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200988 case DISCOVERY_RESOLVING:
989 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200990 case DISCOVERY_STOPPING:
991 break;
992 }
993
994 hdev->discovery.state = state;
995}
996
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300997void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998{
Johan Hedberg30883512012-01-04 14:16:21 +0200999 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001000 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Johan Hedberg561aafb2012-01-04 13:31:59 +02001002 list_for_each_entry_safe(p, n, &cache->all, all) {
1003 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001004 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001006
1007 INIT_LIST_HEAD(&cache->unknown);
1008 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009}
1010
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001011struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1012 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013{
Johan Hedberg30883512012-01-04 14:16:21 +02001014 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 struct inquiry_entry *e;
1016
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001017 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
Johan Hedberg561aafb2012-01-04 13:31:59 +02001019 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001021 return e;
1022 }
1023
1024 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025}
1026
Johan Hedberg561aafb2012-01-04 13:31:59 +02001027struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001028 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001029{
Johan Hedberg30883512012-01-04 14:16:21 +02001030 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001031 struct inquiry_entry *e;
1032
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001033 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001034
1035 list_for_each_entry(e, &cache->unknown, list) {
1036 if (!bacmp(&e->data.bdaddr, bdaddr))
1037 return e;
1038 }
1039
1040 return NULL;
1041}
1042
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001043struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001044 bdaddr_t *bdaddr,
1045 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001046{
1047 struct discovery_state *cache = &hdev->discovery;
1048 struct inquiry_entry *e;
1049
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001050 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001051
1052 list_for_each_entry(e, &cache->resolve, list) {
1053 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1054 return e;
1055 if (!bacmp(&e->data.bdaddr, bdaddr))
1056 return e;
1057 }
1058
1059 return NULL;
1060}
1061
Johan Hedberga3d4e202012-01-09 00:53:02 +02001062void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001063 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001064{
1065 struct discovery_state *cache = &hdev->discovery;
1066 struct list_head *pos = &cache->resolve;
1067 struct inquiry_entry *p;
1068
1069 list_del(&ie->list);
1070
1071 list_for_each_entry(p, &cache->resolve, list) {
1072 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001073 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001074 break;
1075 pos = &p->list;
1076 }
1077
1078 list_add(&ie->list, pos);
1079}
1080
Johan Hedberg31754052012-01-04 13:39:52 +02001081bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001082 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Johan Hedberg30883512012-01-04 14:16:21 +02001084 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001085 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001087 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
Szymon Janc2b2fec42012-11-20 11:38:54 +01001089 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1090
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001091 if (ssp)
1092 *ssp = data->ssp_mode;
1093
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001094 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001095 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001096 if (ie->data.ssp_mode && ssp)
1097 *ssp = true;
1098
Johan Hedberga3d4e202012-01-09 00:53:02 +02001099 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001100 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001101 ie->data.rssi = data->rssi;
1102 hci_inquiry_cache_update_resolve(hdev, ie);
1103 }
1104
Johan Hedberg561aafb2012-01-04 13:31:59 +02001105 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001106 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001107
Johan Hedberg561aafb2012-01-04 13:31:59 +02001108 /* Entry not in the cache. Add new one. */
1109 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1110 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001111 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001112
1113 list_add(&ie->all, &cache->all);
1114
1115 if (name_known) {
1116 ie->name_state = NAME_KNOWN;
1117 } else {
1118 ie->name_state = NAME_NOT_KNOWN;
1119 list_add(&ie->list, &cache->unknown);
1120 }
1121
1122update:
1123 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001124 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001125 ie->name_state = NAME_KNOWN;
1126 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 }
1128
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001129 memcpy(&ie->data, data, sizeof(*data));
1130 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001132
1133 if (ie->name_state == NAME_NOT_KNOWN)
1134 return false;
1135
1136 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137}
1138
1139static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1140{
Johan Hedberg30883512012-01-04 14:16:21 +02001141 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 struct inquiry_info *info = (struct inquiry_info *) buf;
1143 struct inquiry_entry *e;
1144 int copied = 0;
1145
Johan Hedberg561aafb2012-01-04 13:31:59 +02001146 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001148
1149 if (copied >= num)
1150 break;
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 bacpy(&info->bdaddr, &data->bdaddr);
1153 info->pscan_rep_mode = data->pscan_rep_mode;
1154 info->pscan_period_mode = data->pscan_period_mode;
1155 info->pscan_mode = data->pscan_mode;
1156 memcpy(info->dev_class, data->dev_class, 3);
1157 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001160 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 }
1162
1163 BT_DBG("cache %p, copied %d", cache, copied);
1164 return copied;
1165}
1166
Johan Hedberg42c6b122013-03-05 20:37:49 +02001167static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168{
1169 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001170 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 struct hci_cp_inquiry cp;
1172
1173 BT_DBG("%s", hdev->name);
1174
1175 if (test_bit(HCI_INQUIRY, &hdev->flags))
1176 return;
1177
1178 /* Start Inquiry */
1179 memcpy(&cp.lap, &ir->lap, 3);
1180 cp.length = ir->length;
1181 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001182 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183}
1184
Andre Guedes3e13fa12013-03-27 20:04:56 -03001185static int wait_inquiry(void *word)
1186{
1187 schedule();
1188 return signal_pending(current);
1189}
1190
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191int hci_inquiry(void __user *arg)
1192{
1193 __u8 __user *ptr = arg;
1194 struct hci_inquiry_req ir;
1195 struct hci_dev *hdev;
1196 int err = 0, do_inquiry = 0, max_rsp;
1197 long timeo;
1198 __u8 *buf;
1199
1200 if (copy_from_user(&ir, ptr, sizeof(ir)))
1201 return -EFAULT;
1202
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001203 hdev = hci_dev_get(ir.dev_id);
1204 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 return -ENODEV;
1206
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001207 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1208 err = -EBUSY;
1209 goto done;
1210 }
1211
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001212 if (hdev->dev_type != HCI_BREDR) {
1213 err = -EOPNOTSUPP;
1214 goto done;
1215 }
1216
Johan Hedberg56f87902013-10-02 13:43:13 +03001217 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1218 err = -EOPNOTSUPP;
1219 goto done;
1220 }
1221
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001222 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001223 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001224 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001225 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 do_inquiry = 1;
1227 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001228 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229
Marcel Holtmann04837f62006-07-03 10:02:33 +02001230 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001231
1232 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001233 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1234 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001235 if (err < 0)
1236 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001237
1238 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1239 * cleared). If it is interrupted by a signal, return -EINTR.
1240 */
1241 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1242 TASK_INTERRUPTIBLE))
1243 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001246 /* for unlimited number of responses we will use buffer with
1247 * 255 entries
1248 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1250
1251 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1252 * copy it to the user space.
1253 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001254 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001255 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 err = -ENOMEM;
1257 goto done;
1258 }
1259
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001260 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001262 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
1264 BT_DBG("num_rsp %d", ir.num_rsp);
1265
1266 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1267 ptr += sizeof(ir);
1268 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001269 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001271 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 err = -EFAULT;
1273
1274 kfree(buf);
1275
1276done:
1277 hci_dev_put(hdev);
1278 return err;
1279}
1280
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001281static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 int ret = 0;
1284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 BT_DBG("%s %p", hdev->name, hdev);
1286
1287 hci_req_lock(hdev);
1288
Johan Hovold94324962012-03-15 14:48:41 +01001289 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1290 ret = -ENODEV;
1291 goto done;
1292 }
1293
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001294 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1295 /* Check for rfkill but allow the HCI setup stage to
1296 * proceed (which in itself doesn't cause any RF activity).
1297 */
1298 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1299 ret = -ERFKILL;
1300 goto done;
1301 }
1302
1303 /* Check for valid public address or a configured static
1304 * random adddress, but let the HCI setup proceed to
1305 * be able to determine if there is a public address
1306 * or not.
1307 *
1308 * This check is only valid for BR/EDR controllers
1309 * since AMP controllers do not have an address.
1310 */
1311 if (hdev->dev_type == HCI_BREDR &&
1312 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1313 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1314 ret = -EADDRNOTAVAIL;
1315 goto done;
1316 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001317 }
1318
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 if (test_bit(HCI_UP, &hdev->flags)) {
1320 ret = -EALREADY;
1321 goto done;
1322 }
1323
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 if (hdev->open(hdev)) {
1325 ret = -EIO;
1326 goto done;
1327 }
1328
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001329 atomic_set(&hdev->cmd_cnt, 1);
1330 set_bit(HCI_INIT, &hdev->flags);
1331
1332 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1333 ret = hdev->setup(hdev);
1334
1335 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001336 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1337 set_bit(HCI_RAW, &hdev->flags);
1338
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001339 if (!test_bit(HCI_RAW, &hdev->flags) &&
1340 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001341 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 }
1343
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001344 clear_bit(HCI_INIT, &hdev->flags);
1345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 if (!ret) {
1347 hci_dev_hold(hdev);
1348 set_bit(HCI_UP, &hdev->flags);
1349 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001350 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001351 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001352 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001353 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001354 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001355 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001356 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001357 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001359 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001360 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001361 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
1363 skb_queue_purge(&hdev->cmd_q);
1364 skb_queue_purge(&hdev->rx_q);
1365
1366 if (hdev->flush)
1367 hdev->flush(hdev);
1368
1369 if (hdev->sent_cmd) {
1370 kfree_skb(hdev->sent_cmd);
1371 hdev->sent_cmd = NULL;
1372 }
1373
1374 hdev->close(hdev);
1375 hdev->flags = 0;
1376 }
1377
1378done:
1379 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 return ret;
1381}
1382
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001383/* ---- HCI ioctl helpers ---- */
1384
1385int hci_dev_open(__u16 dev)
1386{
1387 struct hci_dev *hdev;
1388 int err;
1389
1390 hdev = hci_dev_get(dev);
1391 if (!hdev)
1392 return -ENODEV;
1393
Johan Hedberge1d08f42013-10-01 22:44:50 +03001394 /* We need to ensure that no other power on/off work is pending
1395 * before proceeding to call hci_dev_do_open. This is
1396 * particularly important if the setup procedure has not yet
1397 * completed.
1398 */
1399 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1400 cancel_delayed_work(&hdev->power_off);
1401
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001402 /* After this call it is guaranteed that the setup procedure
1403 * has finished. This means that error conditions like RFKILL
1404 * or no valid public or static random address apply.
1405 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001406 flush_workqueue(hdev->req_workqueue);
1407
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001408 err = hci_dev_do_open(hdev);
1409
1410 hci_dev_put(hdev);
1411
1412 return err;
1413}
1414
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415static int hci_dev_do_close(struct hci_dev *hdev)
1416{
1417 BT_DBG("%s %p", hdev->name, hdev);
1418
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001419 cancel_delayed_work(&hdev->power_off);
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 hci_req_cancel(hdev, ENODEV);
1422 hci_req_lock(hdev);
1423
1424 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001425 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 hci_req_unlock(hdev);
1427 return 0;
1428 }
1429
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001430 /* Flush RX and TX works */
1431 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001432 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001434 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001435 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001436 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001437 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001438 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001439 }
1440
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001441 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001442 cancel_delayed_work(&hdev->service_cache);
1443
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001444 cancel_delayed_work_sync(&hdev->le_scan_disable);
1445
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001446 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001447 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001449 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
1451 hci_notify(hdev, HCI_DEV_DOWN);
1452
1453 if (hdev->flush)
1454 hdev->flush(hdev);
1455
1456 /* Reset device */
1457 skb_queue_purge(&hdev->cmd_q);
1458 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001459 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001460 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001461 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001463 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 clear_bit(HCI_INIT, &hdev->flags);
1465 }
1466
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001467 /* flush cmd work */
1468 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
1470 /* Drop queues */
1471 skb_queue_purge(&hdev->rx_q);
1472 skb_queue_purge(&hdev->cmd_q);
1473 skb_queue_purge(&hdev->raw_q);
1474
1475 /* Drop last sent command */
1476 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001477 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 kfree_skb(hdev->sent_cmd);
1479 hdev->sent_cmd = NULL;
1480 }
1481
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001482 kfree_skb(hdev->recv_evt);
1483 hdev->recv_evt = NULL;
1484
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 /* After this point our queues are empty
1486 * and no tasks are scheduled. */
1487 hdev->close(hdev);
1488
Johan Hedberg35b973c2013-03-15 17:06:59 -05001489 /* Clear flags */
1490 hdev->flags = 0;
1491 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1492
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001493 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1494 if (hdev->dev_type == HCI_BREDR) {
1495 hci_dev_lock(hdev);
1496 mgmt_powered(hdev, 0);
1497 hci_dev_unlock(hdev);
1498 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001499 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001500
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001501 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001502 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001503
Johan Hedberge59fda82012-02-22 18:11:53 +02001504 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001505 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001506
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 hci_req_unlock(hdev);
1508
1509 hci_dev_put(hdev);
1510 return 0;
1511}
1512
1513int hci_dev_close(__u16 dev)
1514{
1515 struct hci_dev *hdev;
1516 int err;
1517
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001518 hdev = hci_dev_get(dev);
1519 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001521
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001522 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1523 err = -EBUSY;
1524 goto done;
1525 }
1526
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001527 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1528 cancel_delayed_work(&hdev->power_off);
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001531
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001532done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 hci_dev_put(hdev);
1534 return err;
1535}
1536
1537int hci_dev_reset(__u16 dev)
1538{
1539 struct hci_dev *hdev;
1540 int ret = 0;
1541
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001542 hdev = hci_dev_get(dev);
1543 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 return -ENODEV;
1545
1546 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Marcel Holtmann808a0492013-08-26 20:57:58 -07001548 if (!test_bit(HCI_UP, &hdev->flags)) {
1549 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001553 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1554 ret = -EBUSY;
1555 goto done;
1556 }
1557
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 /* Drop queues */
1559 skb_queue_purge(&hdev->rx_q);
1560 skb_queue_purge(&hdev->cmd_q);
1561
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001562 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001563 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001565 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
1567 if (hdev->flush)
1568 hdev->flush(hdev);
1569
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001570 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001571 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
1573 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001574 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
1576done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 hci_req_unlock(hdev);
1578 hci_dev_put(hdev);
1579 return ret;
1580}
1581
1582int hci_dev_reset_stat(__u16 dev)
1583{
1584 struct hci_dev *hdev;
1585 int ret = 0;
1586
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001587 hdev = hci_dev_get(dev);
1588 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 return -ENODEV;
1590
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001591 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1592 ret = -EBUSY;
1593 goto done;
1594 }
1595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1597
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001598done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 return ret;
1601}
1602
1603int hci_dev_cmd(unsigned int cmd, void __user *arg)
1604{
1605 struct hci_dev *hdev;
1606 struct hci_dev_req dr;
1607 int err = 0;
1608
1609 if (copy_from_user(&dr, arg, sizeof(dr)))
1610 return -EFAULT;
1611
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001612 hdev = hci_dev_get(dr.dev_id);
1613 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 return -ENODEV;
1615
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001616 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1617 err = -EBUSY;
1618 goto done;
1619 }
1620
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001621 if (hdev->dev_type != HCI_BREDR) {
1622 err = -EOPNOTSUPP;
1623 goto done;
1624 }
1625
Johan Hedberg56f87902013-10-02 13:43:13 +03001626 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1627 err = -EOPNOTSUPP;
1628 goto done;
1629 }
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 switch (cmd) {
1632 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001633 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1634 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 break;
1636
1637 case HCISETENCRYPT:
1638 if (!lmp_encrypt_capable(hdev)) {
1639 err = -EOPNOTSUPP;
1640 break;
1641 }
1642
1643 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1644 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001645 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1646 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 if (err)
1648 break;
1649 }
1650
Johan Hedberg01178cd2013-03-05 20:37:41 +02001651 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1652 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 break;
1654
1655 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001656 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1657 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 break;
1659
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001660 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001661 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1662 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001663 break;
1664
1665 case HCISETLINKMODE:
1666 hdev->link_mode = ((__u16) dr.dev_opt) &
1667 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1668 break;
1669
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 case HCISETPTYPE:
1671 hdev->pkt_type = (__u16) dr.dev_opt;
1672 break;
1673
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001675 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1676 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 break;
1678
1679 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001680 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1681 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 break;
1683
1684 default:
1685 err = -EINVAL;
1686 break;
1687 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001688
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001689done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 hci_dev_put(hdev);
1691 return err;
1692}
1693
1694int hci_get_dev_list(void __user *arg)
1695{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001696 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 struct hci_dev_list_req *dl;
1698 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 int n = 0, size, err;
1700 __u16 dev_num;
1701
1702 if (get_user(dev_num, (__u16 __user *) arg))
1703 return -EFAULT;
1704
1705 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1706 return -EINVAL;
1707
1708 size = sizeof(*dl) + dev_num * sizeof(*dr);
1709
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001710 dl = kzalloc(size, GFP_KERNEL);
1711 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 return -ENOMEM;
1713
1714 dr = dl->dev_req;
1715
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001716 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001717 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001718 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001719 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001720
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001721 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1722 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001723
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 (dr + n)->dev_id = hdev->id;
1725 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001726
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 if (++n >= dev_num)
1728 break;
1729 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001730 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 dl->dev_num = n;
1733 size = sizeof(*dl) + n * sizeof(*dr);
1734
1735 err = copy_to_user(arg, dl, size);
1736 kfree(dl);
1737
1738 return err ? -EFAULT : 0;
1739}
1740
1741int hci_get_dev_info(void __user *arg)
1742{
1743 struct hci_dev *hdev;
1744 struct hci_dev_info di;
1745 int err = 0;
1746
1747 if (copy_from_user(&di, arg, sizeof(di)))
1748 return -EFAULT;
1749
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001750 hdev = hci_dev_get(di.dev_id);
1751 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 return -ENODEV;
1753
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001754 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001755 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001756
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001757 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1758 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 strcpy(di.name, hdev->name);
1761 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001762 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 di.flags = hdev->flags;
1764 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001765 if (lmp_bredr_capable(hdev)) {
1766 di.acl_mtu = hdev->acl_mtu;
1767 di.acl_pkts = hdev->acl_pkts;
1768 di.sco_mtu = hdev->sco_mtu;
1769 di.sco_pkts = hdev->sco_pkts;
1770 } else {
1771 di.acl_mtu = hdev->le_mtu;
1772 di.acl_pkts = hdev->le_pkts;
1773 di.sco_mtu = 0;
1774 di.sco_pkts = 0;
1775 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 di.link_policy = hdev->link_policy;
1777 di.link_mode = hdev->link_mode;
1778
1779 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1780 memcpy(&di.features, &hdev->features, sizeof(di.features));
1781
1782 if (copy_to_user(arg, &di, sizeof(di)))
1783 err = -EFAULT;
1784
1785 hci_dev_put(hdev);
1786
1787 return err;
1788}
1789
1790/* ---- Interface to HCI drivers ---- */
1791
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001792static int hci_rfkill_set_block(void *data, bool blocked)
1793{
1794 struct hci_dev *hdev = data;
1795
1796 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1797
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001798 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1799 return -EBUSY;
1800
Johan Hedberg5e130362013-09-13 08:58:17 +03001801 if (blocked) {
1802 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001803 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1804 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001805 } else {
1806 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001807 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001808
1809 return 0;
1810}
1811
1812static const struct rfkill_ops hci_rfkill_ops = {
1813 .set_block = hci_rfkill_set_block,
1814};
1815
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001816static void hci_power_on(struct work_struct *work)
1817{
1818 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001819 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001820
1821 BT_DBG("%s", hdev->name);
1822
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001823 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001824 if (err < 0) {
1825 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001826 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001827 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001828
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001829 /* During the HCI setup phase, a few error conditions are
1830 * ignored and they need to be checked now. If they are still
1831 * valid, it is important to turn the device back off.
1832 */
1833 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1834 (hdev->dev_type == HCI_BREDR &&
1835 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1836 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001837 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1838 hci_dev_do_close(hdev);
1839 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001840 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1841 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001842 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001843
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001844 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001845 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001846}
1847
1848static void hci_power_off(struct work_struct *work)
1849{
Johan Hedberg32435532011-11-07 22:16:04 +02001850 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001851 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001852
1853 BT_DBG("%s", hdev->name);
1854
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001855 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001856}
1857
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001858static void hci_discov_off(struct work_struct *work)
1859{
1860 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001861
1862 hdev = container_of(work, struct hci_dev, discov_off.work);
1863
1864 BT_DBG("%s", hdev->name);
1865
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001866 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001867}
1868
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001869int hci_uuids_clear(struct hci_dev *hdev)
1870{
Johan Hedberg48210022013-01-27 00:31:28 +02001871 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001872
Johan Hedberg48210022013-01-27 00:31:28 +02001873 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1874 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001875 kfree(uuid);
1876 }
1877
1878 return 0;
1879}
1880
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001881int hci_link_keys_clear(struct hci_dev *hdev)
1882{
1883 struct list_head *p, *n;
1884
1885 list_for_each_safe(p, n, &hdev->link_keys) {
1886 struct link_key *key;
1887
1888 key = list_entry(p, struct link_key, list);
1889
1890 list_del(p);
1891 kfree(key);
1892 }
1893
1894 return 0;
1895}
1896
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001897int hci_smp_ltks_clear(struct hci_dev *hdev)
1898{
1899 struct smp_ltk *k, *tmp;
1900
1901 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1902 list_del(&k->list);
1903 kfree(k);
1904 }
1905
1906 return 0;
1907}
1908
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001909struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1910{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001911 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001912
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001913 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001914 if (bacmp(bdaddr, &k->bdaddr) == 0)
1915 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001916
1917 return NULL;
1918}
1919
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301920static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001921 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001922{
1923 /* Legacy key */
1924 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301925 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001926
1927 /* Debug keys are insecure so don't store them persistently */
1928 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301929 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001930
1931 /* Changed combination key and there's no previous one */
1932 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301933 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001934
1935 /* Security mode 3 case */
1936 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301937 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001938
1939 /* Neither local nor remote side had no-bonding as requirement */
1940 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301941 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001942
1943 /* Local side had dedicated bonding as requirement */
1944 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301945 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001946
1947 /* Remote side had dedicated bonding as requirement */
1948 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301949 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001950
1951 /* If none of the above criteria match, then don't store the key
1952 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301953 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001954}
1955
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001956struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001957{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001958 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001959
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001960 list_for_each_entry(k, &hdev->long_term_keys, list) {
1961 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001962 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001963 continue;
1964
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001965 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001966 }
1967
1968 return NULL;
1969}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001970
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001971struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001972 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001973{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001974 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001975
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001976 list_for_each_entry(k, &hdev->long_term_keys, list)
1977 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001978 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001979 return k;
1980
1981 return NULL;
1982}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001983
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001984int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001985 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001986{
1987 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301988 u8 old_key_type;
1989 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001990
1991 old_key = hci_find_link_key(hdev, bdaddr);
1992 if (old_key) {
1993 old_key_type = old_key->type;
1994 key = old_key;
1995 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001996 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001997 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1998 if (!key)
1999 return -ENOMEM;
2000 list_add(&key->list, &hdev->link_keys);
2001 }
2002
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002003 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002004
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002005 /* Some buggy controller combinations generate a changed
2006 * combination key for legacy pairing even when there's no
2007 * previous key */
2008 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002009 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002010 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002011 if (conn)
2012 conn->key_type = type;
2013 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002014
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002015 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002016 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002017 key->pin_len = pin_len;
2018
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002019 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002020 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002021 else
2022 key->type = type;
2023
Johan Hedberg4df378a2011-04-28 11:29:03 -07002024 if (!new_key)
2025 return 0;
2026
2027 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2028
Johan Hedberg744cf192011-11-08 20:40:14 +02002029 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002030
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302031 if (conn)
2032 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002033
2034 return 0;
2035}
2036
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002037int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002038 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002039 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002040{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002041 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002042
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002043 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2044 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002045
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002046 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2047 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002048 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002049 else {
2050 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002051 if (!key)
2052 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002053 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002054 }
2055
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002056 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002057 key->bdaddr_type = addr_type;
2058 memcpy(key->val, tk, sizeof(key->val));
2059 key->authenticated = authenticated;
2060 key->ediv = ediv;
2061 key->enc_size = enc_size;
2062 key->type = type;
2063 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002064
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002065 if (!new_key)
2066 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002067
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002068 if (type & HCI_SMP_LTK)
2069 mgmt_new_ltk(hdev, key, 1);
2070
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002071 return 0;
2072}
2073
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002074int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2075{
2076 struct link_key *key;
2077
2078 key = hci_find_link_key(hdev, bdaddr);
2079 if (!key)
2080 return -ENOENT;
2081
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002082 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002083
2084 list_del(&key->list);
2085 kfree(key);
2086
2087 return 0;
2088}
2089
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002090int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2091{
2092 struct smp_ltk *k, *tmp;
2093
2094 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2095 if (bacmp(bdaddr, &k->bdaddr))
2096 continue;
2097
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002098 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002099
2100 list_del(&k->list);
2101 kfree(k);
2102 }
2103
2104 return 0;
2105}
2106
Ville Tervo6bd32322011-02-16 16:32:41 +02002107/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002108static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002109{
2110 struct hci_dev *hdev = (void *) arg;
2111
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002112 if (hdev->sent_cmd) {
2113 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2114 u16 opcode = __le16_to_cpu(sent->opcode);
2115
2116 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2117 } else {
2118 BT_ERR("%s command tx timeout", hdev->name);
2119 }
2120
Ville Tervo6bd32322011-02-16 16:32:41 +02002121 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002122 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002123}
2124
Szymon Janc2763eda2011-03-22 13:12:22 +01002125struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002126 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002127{
2128 struct oob_data *data;
2129
2130 list_for_each_entry(data, &hdev->remote_oob_data, list)
2131 if (bacmp(bdaddr, &data->bdaddr) == 0)
2132 return data;
2133
2134 return NULL;
2135}
2136
2137int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2138{
2139 struct oob_data *data;
2140
2141 data = hci_find_remote_oob_data(hdev, bdaddr);
2142 if (!data)
2143 return -ENOENT;
2144
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002145 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002146
2147 list_del(&data->list);
2148 kfree(data);
2149
2150 return 0;
2151}
2152
2153int hci_remote_oob_data_clear(struct hci_dev *hdev)
2154{
2155 struct oob_data *data, *n;
2156
2157 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2158 list_del(&data->list);
2159 kfree(data);
2160 }
2161
2162 return 0;
2163}
2164
2165int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002166 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002167{
2168 struct oob_data *data;
2169
2170 data = hci_find_remote_oob_data(hdev, bdaddr);
2171
2172 if (!data) {
2173 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2174 if (!data)
2175 return -ENOMEM;
2176
2177 bacpy(&data->bdaddr, bdaddr);
2178 list_add(&data->list, &hdev->remote_oob_data);
2179 }
2180
2181 memcpy(data->hash, hash, sizeof(data->hash));
2182 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2183
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002184 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002185
2186 return 0;
2187}
2188
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002189struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2190 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002191{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002192 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002193
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002194 list_for_each_entry(b, &hdev->blacklist, list) {
2195 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002196 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002197 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002198
2199 return NULL;
2200}
2201
2202int hci_blacklist_clear(struct hci_dev *hdev)
2203{
2204 struct list_head *p, *n;
2205
2206 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002207 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002208
2209 list_del(p);
2210 kfree(b);
2211 }
2212
2213 return 0;
2214}
2215
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002216int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002217{
2218 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002219
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002220 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002221 return -EBADF;
2222
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002223 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002224 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002225
2226 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002227 if (!entry)
2228 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002229
2230 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002231 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002232
2233 list_add(&entry->list, &hdev->blacklist);
2234
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002235 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002236}
2237
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002238int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002239{
2240 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002241
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002242 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002243 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002244
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002245 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002246 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002247 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002248
2249 list_del(&entry->list);
2250 kfree(entry);
2251
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002252 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002253}
2254
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002255static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002256{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002257 if (status) {
2258 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002259
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002260 hci_dev_lock(hdev);
2261 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2262 hci_dev_unlock(hdev);
2263 return;
2264 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002265}
2266
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002267static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002268{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002269 /* General inquiry access code (GIAC) */
2270 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2271 struct hci_request req;
2272 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002273 int err;
2274
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002275 if (status) {
2276 BT_ERR("Failed to disable LE scanning: status %d", status);
2277 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002278 }
2279
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002280 switch (hdev->discovery.type) {
2281 case DISCOV_TYPE_LE:
2282 hci_dev_lock(hdev);
2283 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2284 hci_dev_unlock(hdev);
2285 break;
2286
2287 case DISCOV_TYPE_INTERLEAVED:
2288 hci_req_init(&req, hdev);
2289
2290 memset(&cp, 0, sizeof(cp));
2291 memcpy(&cp.lap, lap, sizeof(cp.lap));
2292 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2293 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2294
2295 hci_dev_lock(hdev);
2296
2297 hci_inquiry_cache_flush(hdev);
2298
2299 err = hci_req_run(&req, inquiry_complete);
2300 if (err) {
2301 BT_ERR("Inquiry request failed: err %d", err);
2302 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2303 }
2304
2305 hci_dev_unlock(hdev);
2306 break;
2307 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002308}
2309
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002310static void le_scan_disable_work(struct work_struct *work)
2311{
2312 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002313 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002314 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002315 struct hci_request req;
2316 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002317
2318 BT_DBG("%s", hdev->name);
2319
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002320 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002321
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002322 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002323 cp.enable = LE_SCAN_DISABLE;
2324 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002325
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002326 err = hci_req_run(&req, le_scan_disable_work_complete);
2327 if (err)
2328 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002329}
2330
David Herrmann9be0dab2012-04-22 14:39:57 +02002331/* Alloc HCI device */
2332struct hci_dev *hci_alloc_dev(void)
2333{
2334 struct hci_dev *hdev;
2335
2336 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2337 if (!hdev)
2338 return NULL;
2339
David Herrmannb1b813d2012-04-22 14:39:58 +02002340 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2341 hdev->esco_type = (ESCO_HV1);
2342 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002343 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2344 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002345 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2346 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002347
David Herrmannb1b813d2012-04-22 14:39:58 +02002348 hdev->sniff_max_interval = 800;
2349 hdev->sniff_min_interval = 80;
2350
Marcel Holtmannbef64732013-10-11 08:23:19 -07002351 hdev->le_scan_interval = 0x0060;
2352 hdev->le_scan_window = 0x0030;
2353
David Herrmannb1b813d2012-04-22 14:39:58 +02002354 mutex_init(&hdev->lock);
2355 mutex_init(&hdev->req_lock);
2356
2357 INIT_LIST_HEAD(&hdev->mgmt_pending);
2358 INIT_LIST_HEAD(&hdev->blacklist);
2359 INIT_LIST_HEAD(&hdev->uuids);
2360 INIT_LIST_HEAD(&hdev->link_keys);
2361 INIT_LIST_HEAD(&hdev->long_term_keys);
2362 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002363 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002364
2365 INIT_WORK(&hdev->rx_work, hci_rx_work);
2366 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2367 INIT_WORK(&hdev->tx_work, hci_tx_work);
2368 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002369
David Herrmannb1b813d2012-04-22 14:39:58 +02002370 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2371 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2372 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2373
David Herrmannb1b813d2012-04-22 14:39:58 +02002374 skb_queue_head_init(&hdev->rx_q);
2375 skb_queue_head_init(&hdev->cmd_q);
2376 skb_queue_head_init(&hdev->raw_q);
2377
2378 init_waitqueue_head(&hdev->req_wait_q);
2379
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002380 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002381
David Herrmannb1b813d2012-04-22 14:39:58 +02002382 hci_init_sysfs(hdev);
2383 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002384
2385 return hdev;
2386}
2387EXPORT_SYMBOL(hci_alloc_dev);
2388
2389/* Free HCI device */
2390void hci_free_dev(struct hci_dev *hdev)
2391{
David Herrmann9be0dab2012-04-22 14:39:57 +02002392 /* will free via device release */
2393 put_device(&hdev->dev);
2394}
2395EXPORT_SYMBOL(hci_free_dev);
2396
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397/* Register HCI device */
2398int hci_register_dev(struct hci_dev *hdev)
2399{
David Herrmannb1b813d2012-04-22 14:39:58 +02002400 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
David Herrmann010666a2012-01-07 15:47:07 +01002402 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 return -EINVAL;
2404
Mat Martineau08add512011-11-02 16:18:36 -07002405 /* Do not allow HCI_AMP devices to register at index 0,
2406 * so the index can be used as the AMP controller ID.
2407 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002408 switch (hdev->dev_type) {
2409 case HCI_BREDR:
2410 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2411 break;
2412 case HCI_AMP:
2413 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2414 break;
2415 default:
2416 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002418
Sasha Levin3df92b32012-05-27 22:36:56 +02002419 if (id < 0)
2420 return id;
2421
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 sprintf(hdev->name, "hci%d", id);
2423 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002424
2425 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2426
Kees Cookd8537542013-07-03 15:04:57 -07002427 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2428 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002429 if (!hdev->workqueue) {
2430 error = -ENOMEM;
2431 goto err;
2432 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002433
Kees Cookd8537542013-07-03 15:04:57 -07002434 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2435 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002436 if (!hdev->req_workqueue) {
2437 destroy_workqueue(hdev->workqueue);
2438 error = -ENOMEM;
2439 goto err;
2440 }
2441
David Herrmann33ca9542011-10-08 14:58:49 +02002442 error = hci_add_sysfs(hdev);
2443 if (error < 0)
2444 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002446 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002447 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2448 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002449 if (hdev->rfkill) {
2450 if (rfkill_register(hdev->rfkill) < 0) {
2451 rfkill_destroy(hdev->rfkill);
2452 hdev->rfkill = NULL;
2453 }
2454 }
2455
Johan Hedberg5e130362013-09-13 08:58:17 +03002456 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2457 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2458
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002459 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002460 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002461
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002462 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002463 /* Assume BR/EDR support until proven otherwise (such as
2464 * through reading supported features during init.
2465 */
2466 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2467 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002468
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002469 write_lock(&hci_dev_list_lock);
2470 list_add(&hdev->list, &hci_dev_list);
2471 write_unlock(&hci_dev_list_lock);
2472
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002474 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475
Johan Hedberg19202572013-01-14 22:33:51 +02002476 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002477
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002479
David Herrmann33ca9542011-10-08 14:58:49 +02002480err_wqueue:
2481 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002482 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002483err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002484 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002485
David Herrmann33ca9542011-10-08 14:58:49 +02002486 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487}
2488EXPORT_SYMBOL(hci_register_dev);
2489
2490/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002491void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492{
Sasha Levin3df92b32012-05-27 22:36:56 +02002493 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002494
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002495 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496
Johan Hovold94324962012-03-15 14:48:41 +01002497 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2498
Sasha Levin3df92b32012-05-27 22:36:56 +02002499 id = hdev->id;
2500
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002501 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002503 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504
2505 hci_dev_do_close(hdev);
2506
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302507 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002508 kfree_skb(hdev->reassembly[i]);
2509
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002510 cancel_work_sync(&hdev->power_on);
2511
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002512 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002513 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002514 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002515 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002516 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002517 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002518
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002519 /* mgmt_index_removed should take care of emptying the
2520 * pending list */
2521 BUG_ON(!list_empty(&hdev->mgmt_pending));
2522
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 hci_notify(hdev, HCI_DEV_UNREG);
2524
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002525 if (hdev->rfkill) {
2526 rfkill_unregister(hdev->rfkill);
2527 rfkill_destroy(hdev->rfkill);
2528 }
2529
David Herrmannce242972011-10-08 14:58:48 +02002530 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002531
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002532 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002533 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002534
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002535 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002536 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002537 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002538 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002539 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002540 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002541 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002542
David Herrmanndc946bd2012-01-07 15:47:24 +01002543 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002544
2545 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546}
2547EXPORT_SYMBOL(hci_unregister_dev);
2548
2549/* Suspend HCI device */
2550int hci_suspend_dev(struct hci_dev *hdev)
2551{
2552 hci_notify(hdev, HCI_DEV_SUSPEND);
2553 return 0;
2554}
2555EXPORT_SYMBOL(hci_suspend_dev);
2556
2557/* Resume HCI device */
2558int hci_resume_dev(struct hci_dev *hdev)
2559{
2560 hci_notify(hdev, HCI_DEV_RESUME);
2561 return 0;
2562}
2563EXPORT_SYMBOL(hci_resume_dev);
2564
Marcel Holtmann76bca882009-11-18 00:40:39 +01002565/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002566int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002567{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002568 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002569 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002570 kfree_skb(skb);
2571 return -ENXIO;
2572 }
2573
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002574 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002575 bt_cb(skb)->incoming = 1;
2576
2577 /* Time stamp */
2578 __net_timestamp(skb);
2579
Marcel Holtmann76bca882009-11-18 00:40:39 +01002580 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002581 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002582
Marcel Holtmann76bca882009-11-18 00:40:39 +01002583 return 0;
2584}
2585EXPORT_SYMBOL(hci_recv_frame);
2586
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302587static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002588 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302589{
2590 int len = 0;
2591 int hlen = 0;
2592 int remain = count;
2593 struct sk_buff *skb;
2594 struct bt_skb_cb *scb;
2595
2596 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002597 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302598 return -EILSEQ;
2599
2600 skb = hdev->reassembly[index];
2601
2602 if (!skb) {
2603 switch (type) {
2604 case HCI_ACLDATA_PKT:
2605 len = HCI_MAX_FRAME_SIZE;
2606 hlen = HCI_ACL_HDR_SIZE;
2607 break;
2608 case HCI_EVENT_PKT:
2609 len = HCI_MAX_EVENT_SIZE;
2610 hlen = HCI_EVENT_HDR_SIZE;
2611 break;
2612 case HCI_SCODATA_PKT:
2613 len = HCI_MAX_SCO_SIZE;
2614 hlen = HCI_SCO_HDR_SIZE;
2615 break;
2616 }
2617
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002618 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302619 if (!skb)
2620 return -ENOMEM;
2621
2622 scb = (void *) skb->cb;
2623 scb->expect = hlen;
2624 scb->pkt_type = type;
2625
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302626 hdev->reassembly[index] = skb;
2627 }
2628
2629 while (count) {
2630 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002631 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302632
2633 memcpy(skb_put(skb, len), data, len);
2634
2635 count -= len;
2636 data += len;
2637 scb->expect -= len;
2638 remain = count;
2639
2640 switch (type) {
2641 case HCI_EVENT_PKT:
2642 if (skb->len == HCI_EVENT_HDR_SIZE) {
2643 struct hci_event_hdr *h = hci_event_hdr(skb);
2644 scb->expect = h->plen;
2645
2646 if (skb_tailroom(skb) < scb->expect) {
2647 kfree_skb(skb);
2648 hdev->reassembly[index] = NULL;
2649 return -ENOMEM;
2650 }
2651 }
2652 break;
2653
2654 case HCI_ACLDATA_PKT:
2655 if (skb->len == HCI_ACL_HDR_SIZE) {
2656 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2657 scb->expect = __le16_to_cpu(h->dlen);
2658
2659 if (skb_tailroom(skb) < scb->expect) {
2660 kfree_skb(skb);
2661 hdev->reassembly[index] = NULL;
2662 return -ENOMEM;
2663 }
2664 }
2665 break;
2666
2667 case HCI_SCODATA_PKT:
2668 if (skb->len == HCI_SCO_HDR_SIZE) {
2669 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2670 scb->expect = h->dlen;
2671
2672 if (skb_tailroom(skb) < scb->expect) {
2673 kfree_skb(skb);
2674 hdev->reassembly[index] = NULL;
2675 return -ENOMEM;
2676 }
2677 }
2678 break;
2679 }
2680
2681 if (scb->expect == 0) {
2682 /* Complete frame */
2683
2684 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002685 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302686
2687 hdev->reassembly[index] = NULL;
2688 return remain;
2689 }
2690 }
2691
2692 return remain;
2693}
2694
Marcel Holtmannef222012007-07-11 06:42:04 +02002695int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2696{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302697 int rem = 0;
2698
Marcel Holtmannef222012007-07-11 06:42:04 +02002699 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2700 return -EILSEQ;
2701
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002702 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002703 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302704 if (rem < 0)
2705 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002706
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302707 data += (count - rem);
2708 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002709 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002710
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302711 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002712}
2713EXPORT_SYMBOL(hci_recv_fragment);
2714
Suraj Sumangala99811512010-07-14 13:02:19 +05302715#define STREAM_REASSEMBLY 0
2716
2717int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2718{
2719 int type;
2720 int rem = 0;
2721
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002722 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302723 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2724
2725 if (!skb) {
2726 struct { char type; } *pkt;
2727
2728 /* Start of the frame */
2729 pkt = data;
2730 type = pkt->type;
2731
2732 data++;
2733 count--;
2734 } else
2735 type = bt_cb(skb)->pkt_type;
2736
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002737 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002738 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302739 if (rem < 0)
2740 return rem;
2741
2742 data += (count - rem);
2743 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002744 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302745
2746 return rem;
2747}
2748EXPORT_SYMBOL(hci_recv_stream_fragment);
2749
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750/* ---- Interface to upper protocols ---- */
2751
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752int hci_register_cb(struct hci_cb *cb)
2753{
2754 BT_DBG("%p name %s", cb, cb->name);
2755
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002756 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002758 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759
2760 return 0;
2761}
2762EXPORT_SYMBOL(hci_register_cb);
2763
2764int hci_unregister_cb(struct hci_cb *cb)
2765{
2766 BT_DBG("%p name %s", cb, cb->name);
2767
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002768 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002770 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771
2772 return 0;
2773}
2774EXPORT_SYMBOL(hci_unregister_cb);
2775
Marcel Holtmann51086992013-10-10 14:54:19 -07002776static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002778 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002780 /* Time stamp */
2781 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002783 /* Send copy to monitor */
2784 hci_send_to_monitor(hdev, skb);
2785
2786 if (atomic_read(&hdev->promisc)) {
2787 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002788 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 }
2790
2791 /* Get rid of skb owner, prior to sending to the driver. */
2792 skb_orphan(skb);
2793
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002794 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002795 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796}
2797
Johan Hedberg3119ae92013-03-05 20:37:44 +02002798void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2799{
2800 skb_queue_head_init(&req->cmd_q);
2801 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002802 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002803}
2804
2805int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2806{
2807 struct hci_dev *hdev = req->hdev;
2808 struct sk_buff *skb;
2809 unsigned long flags;
2810
2811 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2812
Andre Guedes5d73e032013-03-08 11:20:16 -03002813 /* If an error occured during request building, remove all HCI
2814 * commands queued on the HCI request queue.
2815 */
2816 if (req->err) {
2817 skb_queue_purge(&req->cmd_q);
2818 return req->err;
2819 }
2820
Johan Hedberg3119ae92013-03-05 20:37:44 +02002821 /* Do not allow empty requests */
2822 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002823 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002824
2825 skb = skb_peek_tail(&req->cmd_q);
2826 bt_cb(skb)->req.complete = complete;
2827
2828 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2829 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2830 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2831
2832 queue_work(hdev->workqueue, &hdev->cmd_work);
2833
2834 return 0;
2835}
2836
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002837static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002838 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839{
2840 int len = HCI_COMMAND_HDR_SIZE + plen;
2841 struct hci_command_hdr *hdr;
2842 struct sk_buff *skb;
2843
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002845 if (!skb)
2846 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847
2848 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002849 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 hdr->plen = plen;
2851
2852 if (plen)
2853 memcpy(skb_put(skb, plen), param, plen);
2854
2855 BT_DBG("skb len %d", skb->len);
2856
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002857 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002858
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002859 return skb;
2860}
2861
2862/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002863int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2864 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002865{
2866 struct sk_buff *skb;
2867
2868 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2869
2870 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2871 if (!skb) {
2872 BT_ERR("%s no memory for command", hdev->name);
2873 return -ENOMEM;
2874 }
2875
Johan Hedberg11714b32013-03-05 20:37:47 +02002876 /* Stand-alone HCI commands must be flaged as
2877 * single-command requests.
2878 */
2879 bt_cb(skb)->req.start = true;
2880
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002882 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883
2884 return 0;
2885}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886
Johan Hedberg71c76a12013-03-05 20:37:46 +02002887/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002888void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2889 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002890{
2891 struct hci_dev *hdev = req->hdev;
2892 struct sk_buff *skb;
2893
2894 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2895
Andre Guedes34739c12013-03-08 11:20:18 -03002896 /* If an error occured during request building, there is no point in
2897 * queueing the HCI command. We can simply return.
2898 */
2899 if (req->err)
2900 return;
2901
Johan Hedberg71c76a12013-03-05 20:37:46 +02002902 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2903 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002904 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2905 hdev->name, opcode);
2906 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002907 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002908 }
2909
2910 if (skb_queue_empty(&req->cmd_q))
2911 bt_cb(skb)->req.start = true;
2912
Johan Hedberg02350a72013-04-03 21:50:29 +03002913 bt_cb(skb)->req.event = event;
2914
Johan Hedberg71c76a12013-03-05 20:37:46 +02002915 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002916}
2917
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002918void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2919 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002920{
2921 hci_req_add_ev(req, opcode, plen, param, 0);
2922}
2923
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002925void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926{
2927 struct hci_command_hdr *hdr;
2928
2929 if (!hdev->sent_cmd)
2930 return NULL;
2931
2932 hdr = (void *) hdev->sent_cmd->data;
2933
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002934 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 return NULL;
2936
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002937 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938
2939 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2940}
2941
2942/* Send ACL data */
2943static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2944{
2945 struct hci_acl_hdr *hdr;
2946 int len = skb->len;
2947
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002948 skb_push(skb, HCI_ACL_HDR_SIZE);
2949 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002950 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002951 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2952 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953}
2954
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002955static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002956 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002958 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 struct hci_dev *hdev = conn->hdev;
2960 struct sk_buff *list;
2961
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002962 skb->len = skb_headlen(skb);
2963 skb->data_len = 0;
2964
2965 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002966
2967 switch (hdev->dev_type) {
2968 case HCI_BREDR:
2969 hci_add_acl_hdr(skb, conn->handle, flags);
2970 break;
2971 case HCI_AMP:
2972 hci_add_acl_hdr(skb, chan->handle, flags);
2973 break;
2974 default:
2975 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2976 return;
2977 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002978
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002979 list = skb_shinfo(skb)->frag_list;
2980 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981 /* Non fragmented */
2982 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2983
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002984 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 } else {
2986 /* Fragmented */
2987 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2988
2989 skb_shinfo(skb)->frag_list = NULL;
2990
2991 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002992 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002994 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002995
2996 flags &= ~ACL_START;
2997 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 do {
2999 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003000
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003001 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003002 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003
3004 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3005
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003006 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 } while (list);
3008
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003009 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003011}
3012
3013void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3014{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003015 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003016
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003017 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003018
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003019 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003021 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023
3024/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003025void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026{
3027 struct hci_dev *hdev = conn->hdev;
3028 struct hci_sco_hdr hdr;
3029
3030 BT_DBG("%s len %d", hdev->name, skb->len);
3031
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003032 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 hdr.dlen = skb->len;
3034
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003035 skb_push(skb, HCI_SCO_HDR_SIZE);
3036 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003037 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003039 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003040
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003042 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044
3045/* ---- HCI TX task (outgoing data) ---- */
3046
3047/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003048static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3049 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050{
3051 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003052 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003053 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003055 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003057
3058 rcu_read_lock();
3059
3060 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003061 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003063
3064 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3065 continue;
3066
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 num++;
3068
3069 if (c->sent < min) {
3070 min = c->sent;
3071 conn = c;
3072 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003073
3074 if (hci_conn_num(hdev, type) == num)
3075 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 }
3077
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003078 rcu_read_unlock();
3079
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003081 int cnt, q;
3082
3083 switch (conn->type) {
3084 case ACL_LINK:
3085 cnt = hdev->acl_cnt;
3086 break;
3087 case SCO_LINK:
3088 case ESCO_LINK:
3089 cnt = hdev->sco_cnt;
3090 break;
3091 case LE_LINK:
3092 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3093 break;
3094 default:
3095 cnt = 0;
3096 BT_ERR("Unknown link type");
3097 }
3098
3099 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 *quote = q ? q : 1;
3101 } else
3102 *quote = 0;
3103
3104 BT_DBG("conn %p quote %d", conn, *quote);
3105 return conn;
3106}
3107
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003108static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109{
3110 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003111 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112
Ville Tervobae1f5d92011-02-10 22:38:53 -03003113 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003115 rcu_read_lock();
3116
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003118 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003119 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003120 BT_ERR("%s killing stalled connection %pMR",
3121 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003122 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 }
3124 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003125
3126 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127}
3128
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003129static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3130 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003131{
3132 struct hci_conn_hash *h = &hdev->conn_hash;
3133 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003134 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003135 struct hci_conn *conn;
3136 int cnt, q, conn_num = 0;
3137
3138 BT_DBG("%s", hdev->name);
3139
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003140 rcu_read_lock();
3141
3142 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003143 struct hci_chan *tmp;
3144
3145 if (conn->type != type)
3146 continue;
3147
3148 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3149 continue;
3150
3151 conn_num++;
3152
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003153 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003154 struct sk_buff *skb;
3155
3156 if (skb_queue_empty(&tmp->data_q))
3157 continue;
3158
3159 skb = skb_peek(&tmp->data_q);
3160 if (skb->priority < cur_prio)
3161 continue;
3162
3163 if (skb->priority > cur_prio) {
3164 num = 0;
3165 min = ~0;
3166 cur_prio = skb->priority;
3167 }
3168
3169 num++;
3170
3171 if (conn->sent < min) {
3172 min = conn->sent;
3173 chan = tmp;
3174 }
3175 }
3176
3177 if (hci_conn_num(hdev, type) == conn_num)
3178 break;
3179 }
3180
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003181 rcu_read_unlock();
3182
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003183 if (!chan)
3184 return NULL;
3185
3186 switch (chan->conn->type) {
3187 case ACL_LINK:
3188 cnt = hdev->acl_cnt;
3189 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003190 case AMP_LINK:
3191 cnt = hdev->block_cnt;
3192 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003193 case SCO_LINK:
3194 case ESCO_LINK:
3195 cnt = hdev->sco_cnt;
3196 break;
3197 case LE_LINK:
3198 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3199 break;
3200 default:
3201 cnt = 0;
3202 BT_ERR("Unknown link type");
3203 }
3204
3205 q = cnt / num;
3206 *quote = q ? q : 1;
3207 BT_DBG("chan %p quote %d", chan, *quote);
3208 return chan;
3209}
3210
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003211static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3212{
3213 struct hci_conn_hash *h = &hdev->conn_hash;
3214 struct hci_conn *conn;
3215 int num = 0;
3216
3217 BT_DBG("%s", hdev->name);
3218
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003219 rcu_read_lock();
3220
3221 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003222 struct hci_chan *chan;
3223
3224 if (conn->type != type)
3225 continue;
3226
3227 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3228 continue;
3229
3230 num++;
3231
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003232 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003233 struct sk_buff *skb;
3234
3235 if (chan->sent) {
3236 chan->sent = 0;
3237 continue;
3238 }
3239
3240 if (skb_queue_empty(&chan->data_q))
3241 continue;
3242
3243 skb = skb_peek(&chan->data_q);
3244 if (skb->priority >= HCI_PRIO_MAX - 1)
3245 continue;
3246
3247 skb->priority = HCI_PRIO_MAX - 1;
3248
3249 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003250 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003251 }
3252
3253 if (hci_conn_num(hdev, type) == num)
3254 break;
3255 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003256
3257 rcu_read_unlock();
3258
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003259}
3260
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003261static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3262{
3263 /* Calculate count of blocks used by this packet */
3264 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3265}
3266
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003267static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269 if (!test_bit(HCI_RAW, &hdev->flags)) {
3270 /* ACL tx timeout must be longer than maximum
3271 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003272 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003273 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003274 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003276}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003278static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003279{
3280 unsigned int cnt = hdev->acl_cnt;
3281 struct hci_chan *chan;
3282 struct sk_buff *skb;
3283 int quote;
3284
3285 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003286
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003287 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003288 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003289 u32 priority = (skb_peek(&chan->data_q))->priority;
3290 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003291 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003292 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003293
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003294 /* Stop if priority has changed */
3295 if (skb->priority < priority)
3296 break;
3297
3298 skb = skb_dequeue(&chan->data_q);
3299
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003300 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003301 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003302
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003303 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 hdev->acl_last_tx = jiffies;
3305
3306 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003307 chan->sent++;
3308 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309 }
3310 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003311
3312 if (cnt != hdev->acl_cnt)
3313 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314}
3315
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003316static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003317{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003318 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003319 struct hci_chan *chan;
3320 struct sk_buff *skb;
3321 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003322 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003323
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003324 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003325
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003326 BT_DBG("%s", hdev->name);
3327
3328 if (hdev->dev_type == HCI_AMP)
3329 type = AMP_LINK;
3330 else
3331 type = ACL_LINK;
3332
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003333 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003334 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003335 u32 priority = (skb_peek(&chan->data_q))->priority;
3336 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3337 int blocks;
3338
3339 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003340 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003341
3342 /* Stop if priority has changed */
3343 if (skb->priority < priority)
3344 break;
3345
3346 skb = skb_dequeue(&chan->data_q);
3347
3348 blocks = __get_blocks(hdev, skb);
3349 if (blocks > hdev->block_cnt)
3350 return;
3351
3352 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003353 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003354
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003355 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003356 hdev->acl_last_tx = jiffies;
3357
3358 hdev->block_cnt -= blocks;
3359 quote -= blocks;
3360
3361 chan->sent += blocks;
3362 chan->conn->sent += blocks;
3363 }
3364 }
3365
3366 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003367 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003368}
3369
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003370static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003371{
3372 BT_DBG("%s", hdev->name);
3373
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003374 /* No ACL link over BR/EDR controller */
3375 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3376 return;
3377
3378 /* No AMP link over AMP controller */
3379 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003380 return;
3381
3382 switch (hdev->flow_ctl_mode) {
3383 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3384 hci_sched_acl_pkt(hdev);
3385 break;
3386
3387 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3388 hci_sched_acl_blk(hdev);
3389 break;
3390 }
3391}
3392
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003394static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395{
3396 struct hci_conn *conn;
3397 struct sk_buff *skb;
3398 int quote;
3399
3400 BT_DBG("%s", hdev->name);
3401
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003402 if (!hci_conn_num(hdev, SCO_LINK))
3403 return;
3404
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3406 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3407 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003408 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409
3410 conn->sent++;
3411 if (conn->sent == ~0)
3412 conn->sent = 0;
3413 }
3414 }
3415}
3416
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003417static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003418{
3419 struct hci_conn *conn;
3420 struct sk_buff *skb;
3421 int quote;
3422
3423 BT_DBG("%s", hdev->name);
3424
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003425 if (!hci_conn_num(hdev, ESCO_LINK))
3426 return;
3427
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003428 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3429 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003430 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3431 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003432 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003433
3434 conn->sent++;
3435 if (conn->sent == ~0)
3436 conn->sent = 0;
3437 }
3438 }
3439}
3440
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003441static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003442{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003443 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003444 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003445 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003446
3447 BT_DBG("%s", hdev->name);
3448
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003449 if (!hci_conn_num(hdev, LE_LINK))
3450 return;
3451
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003452 if (!test_bit(HCI_RAW, &hdev->flags)) {
3453 /* LE tx timeout must be longer than maximum
3454 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003455 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003456 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003457 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003458 }
3459
3460 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003461 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003462 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003463 u32 priority = (skb_peek(&chan->data_q))->priority;
3464 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003465 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003466 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003467
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003468 /* Stop if priority has changed */
3469 if (skb->priority < priority)
3470 break;
3471
3472 skb = skb_dequeue(&chan->data_q);
3473
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003474 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003475 hdev->le_last_tx = jiffies;
3476
3477 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003478 chan->sent++;
3479 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003480 }
3481 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003482
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003483 if (hdev->le_pkts)
3484 hdev->le_cnt = cnt;
3485 else
3486 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003487
3488 if (cnt != tmp)
3489 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003490}
3491
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003492static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003494 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 struct sk_buff *skb;
3496
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003497 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003498 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499
Marcel Holtmann52de5992013-09-03 18:08:38 -07003500 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3501 /* Schedule queues and send stuff to HCI driver */
3502 hci_sched_acl(hdev);
3503 hci_sched_sco(hdev);
3504 hci_sched_esco(hdev);
3505 hci_sched_le(hdev);
3506 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003507
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 /* Send next queued raw (unknown type) packet */
3509 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003510 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511}
3512
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003513/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514
3515/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003516static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517{
3518 struct hci_acl_hdr *hdr = (void *) skb->data;
3519 struct hci_conn *conn;
3520 __u16 handle, flags;
3521
3522 skb_pull(skb, HCI_ACL_HDR_SIZE);
3523
3524 handle = __le16_to_cpu(hdr->handle);
3525 flags = hci_flags(handle);
3526 handle = hci_handle(handle);
3527
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003528 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003529 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530
3531 hdev->stat.acl_rx++;
3532
3533 hci_dev_lock(hdev);
3534 conn = hci_conn_hash_lookup_handle(hdev, handle);
3535 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003536
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003538 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003539
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003541 l2cap_recv_acldata(conn, skb, flags);
3542 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003544 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003545 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 }
3547
3548 kfree_skb(skb);
3549}
3550
3551/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003552static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553{
3554 struct hci_sco_hdr *hdr = (void *) skb->data;
3555 struct hci_conn *conn;
3556 __u16 handle;
3557
3558 skb_pull(skb, HCI_SCO_HDR_SIZE);
3559
3560 handle = __le16_to_cpu(hdr->handle);
3561
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003562 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563
3564 hdev->stat.sco_rx++;
3565
3566 hci_dev_lock(hdev);
3567 conn = hci_conn_hash_lookup_handle(hdev, handle);
3568 hci_dev_unlock(hdev);
3569
3570 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003572 sco_recv_scodata(conn, skb);
3573 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003575 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003576 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 }
3578
3579 kfree_skb(skb);
3580}
3581
Johan Hedberg9238f362013-03-05 20:37:48 +02003582static bool hci_req_is_complete(struct hci_dev *hdev)
3583{
3584 struct sk_buff *skb;
3585
3586 skb = skb_peek(&hdev->cmd_q);
3587 if (!skb)
3588 return true;
3589
3590 return bt_cb(skb)->req.start;
3591}
3592
Johan Hedberg42c6b122013-03-05 20:37:49 +02003593static void hci_resend_last(struct hci_dev *hdev)
3594{
3595 struct hci_command_hdr *sent;
3596 struct sk_buff *skb;
3597 u16 opcode;
3598
3599 if (!hdev->sent_cmd)
3600 return;
3601
3602 sent = (void *) hdev->sent_cmd->data;
3603 opcode = __le16_to_cpu(sent->opcode);
3604 if (opcode == HCI_OP_RESET)
3605 return;
3606
3607 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3608 if (!skb)
3609 return;
3610
3611 skb_queue_head(&hdev->cmd_q, skb);
3612 queue_work(hdev->workqueue, &hdev->cmd_work);
3613}
3614
Johan Hedberg9238f362013-03-05 20:37:48 +02003615void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3616{
3617 hci_req_complete_t req_complete = NULL;
3618 struct sk_buff *skb;
3619 unsigned long flags;
3620
3621 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3622
Johan Hedberg42c6b122013-03-05 20:37:49 +02003623 /* If the completed command doesn't match the last one that was
3624 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003625 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003626 if (!hci_sent_cmd_data(hdev, opcode)) {
3627 /* Some CSR based controllers generate a spontaneous
3628 * reset complete event during init and any pending
3629 * command will never be completed. In such a case we
3630 * need to resend whatever was the last sent
3631 * command.
3632 */
3633 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3634 hci_resend_last(hdev);
3635
Johan Hedberg9238f362013-03-05 20:37:48 +02003636 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003637 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003638
3639 /* If the command succeeded and there's still more commands in
3640 * this request the request is not yet complete.
3641 */
3642 if (!status && !hci_req_is_complete(hdev))
3643 return;
3644
3645 /* If this was the last command in a request the complete
3646 * callback would be found in hdev->sent_cmd instead of the
3647 * command queue (hdev->cmd_q).
3648 */
3649 if (hdev->sent_cmd) {
3650 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003651
3652 if (req_complete) {
3653 /* We must set the complete callback to NULL to
3654 * avoid calling the callback more than once if
3655 * this function gets called again.
3656 */
3657 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3658
Johan Hedberg9238f362013-03-05 20:37:48 +02003659 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003660 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003661 }
3662
3663 /* Remove all pending commands belonging to this request */
3664 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3665 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3666 if (bt_cb(skb)->req.start) {
3667 __skb_queue_head(&hdev->cmd_q, skb);
3668 break;
3669 }
3670
3671 req_complete = bt_cb(skb)->req.complete;
3672 kfree_skb(skb);
3673 }
3674 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3675
3676call_complete:
3677 if (req_complete)
3678 req_complete(hdev, status);
3679}
3680
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003681static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003683 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 struct sk_buff *skb;
3685
3686 BT_DBG("%s", hdev->name);
3687
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003689 /* Send copy to monitor */
3690 hci_send_to_monitor(hdev, skb);
3691
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 if (atomic_read(&hdev->promisc)) {
3693 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003694 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 }
3696
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003697 if (test_bit(HCI_RAW, &hdev->flags) ||
3698 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 kfree_skb(skb);
3700 continue;
3701 }
3702
3703 if (test_bit(HCI_INIT, &hdev->flags)) {
3704 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003705 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706 case HCI_ACLDATA_PKT:
3707 case HCI_SCODATA_PKT:
3708 kfree_skb(skb);
3709 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003710 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 }
3712
3713 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003714 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003716 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 hci_event_packet(hdev, skb);
3718 break;
3719
3720 case HCI_ACLDATA_PKT:
3721 BT_DBG("%s ACL data packet", hdev->name);
3722 hci_acldata_packet(hdev, skb);
3723 break;
3724
3725 case HCI_SCODATA_PKT:
3726 BT_DBG("%s SCO data packet", hdev->name);
3727 hci_scodata_packet(hdev, skb);
3728 break;
3729
3730 default:
3731 kfree_skb(skb);
3732 break;
3733 }
3734 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735}
3736
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003737static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003739 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740 struct sk_buff *skb;
3741
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003742 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3743 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003746 if (atomic_read(&hdev->cmd_cnt)) {
3747 skb = skb_dequeue(&hdev->cmd_q);
3748 if (!skb)
3749 return;
3750
Wei Yongjun7585b972009-02-25 18:29:52 +08003751 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003753 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003754 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003756 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003757 if (test_bit(HCI_RESET, &hdev->flags))
3758 del_timer(&hdev->cmd_timer);
3759 else
3760 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003761 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 } else {
3763 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003764 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 }
3766 }
3767}