blob: 0c636ba1ba1620587adc0ca10d06c461508af85b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700321
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200332
333 BT_DBG("%s %ld", hdev->name, opt);
334
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300338
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339 switch (hdev->dev_type) {
340 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200342 break;
343
344 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200345 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200352}
353
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200356 __le16 param;
357 __u8 flt_type;
358
359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200361
362 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200364
365 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200367
368 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200370
371 /* Clear Event Filters */
372 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200373 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200374
375 /* Connection accept timeout ~20 secs */
376 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200377 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378
Johan Hedbergf332ec62013-03-15 17:07:11 -0500379 /* Read page scan parameters */
380 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
381 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
382 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
383 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200384}
385
Johan Hedberg42c6b122013-03-05 20:37:49 +0200386static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200387{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300388 struct hci_dev *hdev = req->hdev;
389
Johan Hedberg2177bab2013-03-05 20:37:43 +0200390 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200392
393 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200395
396 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398
399 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200401
402 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300404
405 /* LE-only controllers have LE implicitly enabled */
406 if (!lmp_bredr_capable(hdev))
407 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408}
409
410static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
411{
412 if (lmp_ext_inq_capable(hdev))
413 return 0x02;
414
415 if (lmp_inq_rssi_capable(hdev))
416 return 0x01;
417
418 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
419 hdev->lmp_subver == 0x0757)
420 return 0x01;
421
422 if (hdev->manufacturer == 15) {
423 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
424 return 0x01;
425 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
426 return 0x01;
427 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
428 return 0x01;
429 }
430
431 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
432 hdev->lmp_subver == 0x1805)
433 return 0x01;
434
435 return 0x00;
436}
437
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439{
440 u8 mode;
441
Johan Hedberg42c6b122013-03-05 20:37:49 +0200442 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445}
446
Johan Hedberg42c6b122013-03-05 20:37:49 +0200447static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200448{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449 struct hci_dev *hdev = req->hdev;
450
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451 /* The second byte is 0xff instead of 0x9f (two reserved bits
452 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
453 * command otherwise.
454 */
455 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
456
457 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
458 * any event mask for pre 1.2 devices.
459 */
460 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
461 return;
462
463 if (lmp_bredr_capable(hdev)) {
464 events[4] |= 0x01; /* Flow Specification Complete */
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
466 events[4] |= 0x04; /* Read Remote Extended Features Complete */
467 events[5] |= 0x08; /* Synchronous Connection Complete */
468 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700469 } else {
470 /* Use a different default for LE-only devices */
471 memset(events, 0, sizeof(events));
472 events[0] |= 0x10; /* Disconnection Complete */
473 events[0] |= 0x80; /* Encryption Change */
474 events[1] |= 0x08; /* Read Remote Version Information Complete */
475 events[1] |= 0x20; /* Command Complete */
476 events[1] |= 0x40; /* Command Status */
477 events[1] |= 0x80; /* Hardware Error */
478 events[2] |= 0x04; /* Number of Completed Packets */
479 events[3] |= 0x02; /* Data Buffer Overflow */
480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200481 }
482
483 if (lmp_inq_rssi_capable(hdev))
484 events[4] |= 0x02; /* Inquiry Result with RSSI */
485
486 if (lmp_sniffsubr_capable(hdev))
487 events[5] |= 0x20; /* Sniff Subrating */
488
489 if (lmp_pause_enc_capable(hdev))
490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
491
492 if (lmp_ext_inq_capable(hdev))
493 events[5] |= 0x40; /* Extended Inquiry Result */
494
495 if (lmp_no_flush_capable(hdev))
496 events[7] |= 0x01; /* Enhanced Flush Complete */
497
498 if (lmp_lsto_capable(hdev))
499 events[6] |= 0x80; /* Link Supervision Timeout Changed */
500
501 if (lmp_ssp_capable(hdev)) {
502 events[6] |= 0x01; /* IO Capability Request */
503 events[6] |= 0x02; /* IO Capability Response */
504 events[6] |= 0x04; /* User Confirmation Request */
505 events[6] |= 0x08; /* User Passkey Request */
506 events[6] |= 0x10; /* Remote OOB Data Request */
507 events[6] |= 0x20; /* Simple Pairing Complete */
508 events[7] |= 0x04; /* User Passkey Notification */
509 events[7] |= 0x08; /* Keypress Notification */
510 events[7] |= 0x10; /* Remote Host Supported
511 * Features Notification
512 */
513 }
514
515 if (lmp_le_capable(hdev))
516 events[7] |= 0x20; /* LE Meta-Event */
517
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200519
520 if (lmp_le_capable(hdev)) {
521 memset(events, 0, sizeof(events));
522 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200523 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
524 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525 }
526}
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 struct hci_dev *hdev = req->hdev;
531
Johan Hedberg2177bab2013-03-05 20:37:43 +0200532 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300534 else
535 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536
537 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300542 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
543 * local supported commands HCI command.
544 */
545 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200546 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200547
548 if (lmp_ssp_capable(hdev)) {
549 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
550 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200551 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
552 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553 } else {
554 struct hci_cp_write_eir cp;
555
556 memset(hdev->eir, 0, sizeof(hdev->eir));
557 memset(&cp, 0, sizeof(cp));
558
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200560 }
561 }
562
563 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200564 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200565
566 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200568
569 if (lmp_ext_feat_capable(hdev)) {
570 struct hci_cp_read_local_ext_features cp;
571
572 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
574 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575 }
576
577 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
578 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
580 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 }
582}
583
Johan Hedberg42c6b122013-03-05 20:37:49 +0200584static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587 struct hci_cp_write_def_link_policy cp;
588 u16 link_policy = 0;
589
590 if (lmp_rswitch_capable(hdev))
591 link_policy |= HCI_LP_RSWITCH;
592 if (lmp_hold_capable(hdev))
593 link_policy |= HCI_LP_HOLD;
594 if (lmp_sniff_capable(hdev))
595 link_policy |= HCI_LP_SNIFF;
596 if (lmp_park_capable(hdev))
597 link_policy |= HCI_LP_PARK;
598
599 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200600 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200601}
602
Johan Hedberg42c6b122013-03-05 20:37:49 +0200603static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200604{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200605 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200606 struct hci_cp_write_le_host_supported cp;
607
Johan Hedbergc73eee92013-04-19 18:35:21 +0300608 /* LE-only devices do not support explicit enablement */
609 if (!lmp_bredr_capable(hdev))
610 return;
611
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612 memset(&cp, 0, sizeof(cp));
613
614 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
615 cp.le = 0x01;
616 cp.simul = lmp_le_br_capable(hdev);
617 }
618
619 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
621 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622}
623
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300624static void hci_set_event_mask_page_2(struct hci_request *req)
625{
626 struct hci_dev *hdev = req->hdev;
627 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
628
629 /* If Connectionless Slave Broadcast master role is supported
630 * enable all necessary events for it.
631 */
632 if (hdev->features[2][0] & 0x01) {
633 events[1] |= 0x40; /* Triggered Clock Capture */
634 events[1] |= 0x80; /* Synchronization Train Complete */
635 events[2] |= 0x10; /* Slave Page Response Timeout */
636 events[2] |= 0x20; /* CSB Channel Map Change */
637 }
638
639 /* If Connectionless Slave Broadcast slave role is supported
640 * enable all necessary events for it.
641 */
642 if (hdev->features[2][0] & 0x02) {
643 events[2] |= 0x01; /* Synchronization Train Received */
644 events[2] |= 0x02; /* CSB Receive */
645 events[2] |= 0x04; /* CSB Timeout */
646 events[2] |= 0x08; /* Truncated Page Complete */
647 }
648
649 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
650}
651
Johan Hedberg42c6b122013-03-05 20:37:49 +0200652static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200653{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300655 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100657 /* Some Broadcom based Bluetooth controllers do not support the
658 * Delete Stored Link Key command. They are clearly indicating its
659 * absence in the bit mask of supported commands.
660 *
661 * Check the supported commands and only if the the command is marked
662 * as supported send it. If not supported assume that the controller
663 * does not have actual support for stored link keys which makes this
664 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700665 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300666 if (hdev->commands[6] & 0x80) {
667 struct hci_cp_delete_stored_link_key cp;
668
669 bacpy(&cp.bdaddr, BDADDR_ANY);
670 cp.delete_all = 0x01;
671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
672 sizeof(cp), &cp);
673 }
674
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500678 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200679 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500680 hci_update_ad(req);
681 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300682
683 /* Read features beyond page 1 if available */
684 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
685 struct hci_cp_read_local_ext_features cp;
686
687 cp.page = p;
688 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
689 sizeof(cp), &cp);
690 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200691}
692
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300693static void hci_init4_req(struct hci_request *req, unsigned long opt)
694{
695 struct hci_dev *hdev = req->hdev;
696
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300697 /* Set event mask page 2 if the HCI command for it is supported */
698 if (hdev->commands[22] & 0x04)
699 hci_set_event_mask_page_2(req);
700
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300701 /* Check for Synchronization Train support */
702 if (hdev->features[2][0] & 0x04)
703 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
704}
705
Johan Hedberg2177bab2013-03-05 20:37:43 +0200706static int __hci_init(struct hci_dev *hdev)
707{
708 int err;
709
710 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
711 if (err < 0)
712 return err;
713
714 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
715 * BR/EDR/LE type controllers. AMP controllers only need the
716 * first stage init.
717 */
718 if (hdev->dev_type != HCI_BREDR)
719 return 0;
720
721 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
722 if (err < 0)
723 return err;
724
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300725 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
726 if (err < 0)
727 return err;
728
729 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200730}
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
734 __u8 scan = opt;
735
Johan Hedberg42c6b122013-03-05 20:37:49 +0200736 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
738 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200739 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740}
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
744 __u8 auth = opt;
745
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
748 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753{
754 __u8 encrypt = opt;
755
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200758 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760}
761
Johan Hedberg42c6b122013-03-05 20:37:49 +0200762static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200763{
764 __le16 policy = cpu_to_le16(opt);
765
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200767
768 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200769 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200770}
771
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900772/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * Device is held on return. */
774struct hci_dev *hci_dev_get(int index)
775{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200776 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 BT_DBG("%d", index);
779
780 if (index < 0)
781 return NULL;
782
783 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200784 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 if (d->id == index) {
786 hdev = hci_dev_hold(d);
787 break;
788 }
789 }
790 read_unlock(&hci_dev_list_lock);
791 return hdev;
792}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200795
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200796bool hci_discovery_active(struct hci_dev *hdev)
797{
798 struct discovery_state *discov = &hdev->discovery;
799
Andre Guedes6fbe1952012-02-03 17:47:58 -0300800 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300801 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300802 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200803 return true;
804
Andre Guedes6fbe1952012-02-03 17:47:58 -0300805 default:
806 return false;
807 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200808}
809
Johan Hedbergff9ef572012-01-04 14:23:45 +0200810void hci_discovery_set_state(struct hci_dev *hdev, int state)
811{
812 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
813
814 if (hdev->discovery.state == state)
815 return;
816
817 switch (state) {
818 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300819 if (hdev->discovery.state != DISCOVERY_STARTING)
820 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200821 break;
822 case DISCOVERY_STARTING:
823 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300824 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200825 mgmt_discovering(hdev, 1);
826 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200827 case DISCOVERY_RESOLVING:
828 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200829 case DISCOVERY_STOPPING:
830 break;
831 }
832
833 hdev->discovery.state = state;
834}
835
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300836void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837{
Johan Hedberg30883512012-01-04 14:16:21 +0200838 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200839 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Johan Hedberg561aafb2012-01-04 13:31:59 +0200841 list_for_each_entry_safe(p, n, &cache->all, all) {
842 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200843 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200845
846 INIT_LIST_HEAD(&cache->unknown);
847 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848}
849
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300850struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
851 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
Johan Hedberg30883512012-01-04 14:16:21 +0200853 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 struct inquiry_entry *e;
855
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300856 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Johan Hedberg561aafb2012-01-04 13:31:59 +0200858 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200860 return e;
861 }
862
863 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
865
Johan Hedberg561aafb2012-01-04 13:31:59 +0200866struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300867 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200868{
Johan Hedberg30883512012-01-04 14:16:21 +0200869 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200870 struct inquiry_entry *e;
871
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300872 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200873
874 list_for_each_entry(e, &cache->unknown, list) {
875 if (!bacmp(&e->data.bdaddr, bdaddr))
876 return e;
877 }
878
879 return NULL;
880}
881
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200882struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300883 bdaddr_t *bdaddr,
884 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200885{
886 struct discovery_state *cache = &hdev->discovery;
887 struct inquiry_entry *e;
888
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300889 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200890
891 list_for_each_entry(e, &cache->resolve, list) {
892 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
893 return e;
894 if (!bacmp(&e->data.bdaddr, bdaddr))
895 return e;
896 }
897
898 return NULL;
899}
900
Johan Hedberga3d4e202012-01-09 00:53:02 +0200901void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300902 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200903{
904 struct discovery_state *cache = &hdev->discovery;
905 struct list_head *pos = &cache->resolve;
906 struct inquiry_entry *p;
907
908 list_del(&ie->list);
909
910 list_for_each_entry(p, &cache->resolve, list) {
911 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300912 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200913 break;
914 pos = &p->list;
915 }
916
917 list_add(&ie->list, pos);
918}
919
Johan Hedberg31754052012-01-04 13:39:52 +0200920bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300921 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
Johan Hedberg30883512012-01-04 14:16:21 +0200923 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200924 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300926 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Szymon Janc2b2fec42012-11-20 11:38:54 +0100928 hci_remove_remote_oob_data(hdev, &data->bdaddr);
929
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200930 if (ssp)
931 *ssp = data->ssp_mode;
932
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200933 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200934 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200935 if (ie->data.ssp_mode && ssp)
936 *ssp = true;
937
Johan Hedberga3d4e202012-01-09 00:53:02 +0200938 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300939 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200940 ie->data.rssi = data->rssi;
941 hci_inquiry_cache_update_resolve(hdev, ie);
942 }
943
Johan Hedberg561aafb2012-01-04 13:31:59 +0200944 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200945 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200946
Johan Hedberg561aafb2012-01-04 13:31:59 +0200947 /* Entry not in the cache. Add new one. */
948 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
949 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200950 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200951
952 list_add(&ie->all, &cache->all);
953
954 if (name_known) {
955 ie->name_state = NAME_KNOWN;
956 } else {
957 ie->name_state = NAME_NOT_KNOWN;
958 list_add(&ie->list, &cache->unknown);
959 }
960
961update:
962 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300963 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200964 ie->name_state = NAME_KNOWN;
965 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
967
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200968 memcpy(&ie->data, data, sizeof(*data));
969 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200971
972 if (ie->name_state == NAME_NOT_KNOWN)
973 return false;
974
975 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976}
977
978static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
979{
Johan Hedberg30883512012-01-04 14:16:21 +0200980 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 struct inquiry_info *info = (struct inquiry_info *) buf;
982 struct inquiry_entry *e;
983 int copied = 0;
984
Johan Hedberg561aafb2012-01-04 13:31:59 +0200985 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200987
988 if (copied >= num)
989 break;
990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 bacpy(&info->bdaddr, &data->bdaddr);
992 info->pscan_rep_mode = data->pscan_rep_mode;
993 info->pscan_period_mode = data->pscan_period_mode;
994 info->pscan_mode = data->pscan_mode;
995 memcpy(info->dev_class, data->dev_class, 3);
996 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200999 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 }
1001
1002 BT_DBG("cache %p, copied %d", cache, copied);
1003 return copied;
1004}
1005
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007{
1008 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 struct hci_cp_inquiry cp;
1011
1012 BT_DBG("%s", hdev->name);
1013
1014 if (test_bit(HCI_INQUIRY, &hdev->flags))
1015 return;
1016
1017 /* Start Inquiry */
1018 memcpy(&cp.lap, &ir->lap, 3);
1019 cp.length = ir->length;
1020 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001021 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022}
1023
Andre Guedes3e13fa12013-03-27 20:04:56 -03001024static int wait_inquiry(void *word)
1025{
1026 schedule();
1027 return signal_pending(current);
1028}
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030int hci_inquiry(void __user *arg)
1031{
1032 __u8 __user *ptr = arg;
1033 struct hci_inquiry_req ir;
1034 struct hci_dev *hdev;
1035 int err = 0, do_inquiry = 0, max_rsp;
1036 long timeo;
1037 __u8 *buf;
1038
1039 if (copy_from_user(&ir, ptr, sizeof(ir)))
1040 return -EFAULT;
1041
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001042 hdev = hci_dev_get(ir.dev_id);
1043 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 return -ENODEV;
1045
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001046 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1047 err = -EBUSY;
1048 goto done;
1049 }
1050
Johan Hedberg56f87902013-10-02 13:43:13 +03001051 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1052 err = -EOPNOTSUPP;
1053 goto done;
1054 }
1055
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001056 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001057 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001058 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001059 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 do_inquiry = 1;
1061 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001062 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
Marcel Holtmann04837f62006-07-03 10:02:33 +02001064 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001065
1066 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001067 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1068 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001069 if (err < 0)
1070 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001071
1072 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1073 * cleared). If it is interrupted by a signal, return -EINTR.
1074 */
1075 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1076 TASK_INTERRUPTIBLE))
1077 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001078 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001080 /* for unlimited number of responses we will use buffer with
1081 * 255 entries
1082 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1084
1085 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1086 * copy it to the user space.
1087 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001088 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001089 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 err = -ENOMEM;
1091 goto done;
1092 }
1093
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001094 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001096 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
1098 BT_DBG("num_rsp %d", ir.num_rsp);
1099
1100 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1101 ptr += sizeof(ir);
1102 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001103 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001105 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 err = -EFAULT;
1107
1108 kfree(buf);
1109
1110done:
1111 hci_dev_put(hdev);
1112 return err;
1113}
1114
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001115static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1116{
1117 u8 ad_len = 0, flags = 0;
1118 size_t name_len;
1119
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001120 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001121 flags |= LE_AD_GENERAL;
1122
Johan Hedberg11802b22013-10-02 16:02:24 +03001123 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1124 if (lmp_le_br_capable(hdev))
1125 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1126 if (lmp_host_le_br_capable(hdev))
1127 flags |= LE_AD_SIM_LE_BREDR_HOST;
1128 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001129 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001130 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001131
1132 if (flags) {
1133 BT_DBG("adv flags 0x%02x", flags);
1134
1135 ptr[0] = 2;
1136 ptr[1] = EIR_FLAGS;
1137 ptr[2] = flags;
1138
1139 ad_len += 3;
1140 ptr += 3;
1141 }
1142
1143 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1144 ptr[0] = 2;
1145 ptr[1] = EIR_TX_POWER;
1146 ptr[2] = (u8) hdev->adv_tx_power;
1147
1148 ad_len += 3;
1149 ptr += 3;
1150 }
1151
1152 name_len = strlen(hdev->dev_name);
1153 if (name_len > 0) {
1154 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1155
1156 if (name_len > max_len) {
1157 name_len = max_len;
1158 ptr[1] = EIR_NAME_SHORT;
1159 } else
1160 ptr[1] = EIR_NAME_COMPLETE;
1161
1162 ptr[0] = name_len + 1;
1163
1164 memcpy(ptr + 2, hdev->dev_name, name_len);
1165
1166 ad_len += (name_len + 2);
1167 ptr += (name_len + 2);
1168 }
1169
1170 return ad_len;
1171}
1172
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001173void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001174{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001175 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001176 struct hci_cp_le_set_adv_data cp;
1177 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001178
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001179 if (!lmp_le_capable(hdev))
1180 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001181
1182 memset(&cp, 0, sizeof(cp));
1183
1184 len = create_ad(hdev, cp.data);
1185
1186 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001187 memcmp(cp.data, hdev->adv_data, len) == 0)
1188 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001189
1190 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1191 hdev->adv_data_len = len;
1192
1193 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001194
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001195 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001196}
1197
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001198static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 int ret = 0;
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 BT_DBG("%s %p", hdev->name, hdev);
1203
1204 hci_req_lock(hdev);
1205
Johan Hovold94324962012-03-15 14:48:41 +01001206 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1207 ret = -ENODEV;
1208 goto done;
1209 }
1210
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001211 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1212 /* Check for rfkill but allow the HCI setup stage to
1213 * proceed (which in itself doesn't cause any RF activity).
1214 */
1215 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1216 ret = -ERFKILL;
1217 goto done;
1218 }
1219
1220 /* Check for valid public address or a configured static
1221 * random adddress, but let the HCI setup proceed to
1222 * be able to determine if there is a public address
1223 * or not.
1224 *
1225 * This check is only valid for BR/EDR controllers
1226 * since AMP controllers do not have an address.
1227 */
1228 if (hdev->dev_type == HCI_BREDR &&
1229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1231 ret = -EADDRNOTAVAIL;
1232 goto done;
1233 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001234 }
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 if (test_bit(HCI_UP, &hdev->flags)) {
1237 ret = -EALREADY;
1238 goto done;
1239 }
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 if (hdev->open(hdev)) {
1242 ret = -EIO;
1243 goto done;
1244 }
1245
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001246 atomic_set(&hdev->cmd_cnt, 1);
1247 set_bit(HCI_INIT, &hdev->flags);
1248
1249 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1250 ret = hdev->setup(hdev);
1251
1252 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001253 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1254 set_bit(HCI_RAW, &hdev->flags);
1255
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001256 if (!test_bit(HCI_RAW, &hdev->flags) &&
1257 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001258 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 }
1260
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001261 clear_bit(HCI_INIT, &hdev->flags);
1262
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 if (!ret) {
1264 hci_dev_hold(hdev);
1265 set_bit(HCI_UP, &hdev->flags);
1266 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001267 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001268 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001269 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001270 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001271 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001272 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001273 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001274 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001276 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001277 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001278 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
1280 skb_queue_purge(&hdev->cmd_q);
1281 skb_queue_purge(&hdev->rx_q);
1282
1283 if (hdev->flush)
1284 hdev->flush(hdev);
1285
1286 if (hdev->sent_cmd) {
1287 kfree_skb(hdev->sent_cmd);
1288 hdev->sent_cmd = NULL;
1289 }
1290
1291 hdev->close(hdev);
1292 hdev->flags = 0;
1293 }
1294
1295done:
1296 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 return ret;
1298}
1299
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001300/* ---- HCI ioctl helpers ---- */
1301
1302int hci_dev_open(__u16 dev)
1303{
1304 struct hci_dev *hdev;
1305 int err;
1306
1307 hdev = hci_dev_get(dev);
1308 if (!hdev)
1309 return -ENODEV;
1310
Johan Hedberge1d08f42013-10-01 22:44:50 +03001311 /* We need to ensure that no other power on/off work is pending
1312 * before proceeding to call hci_dev_do_open. This is
1313 * particularly important if the setup procedure has not yet
1314 * completed.
1315 */
1316 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1317 cancel_delayed_work(&hdev->power_off);
1318
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001319 /* After this call it is guaranteed that the setup procedure
1320 * has finished. This means that error conditions like RFKILL
1321 * or no valid public or static random address apply.
1322 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001323 flush_workqueue(hdev->req_workqueue);
1324
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001325 err = hci_dev_do_open(hdev);
1326
1327 hci_dev_put(hdev);
1328
1329 return err;
1330}
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332static int hci_dev_do_close(struct hci_dev *hdev)
1333{
1334 BT_DBG("%s %p", hdev->name, hdev);
1335
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001336 cancel_delayed_work(&hdev->power_off);
1337
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 hci_req_cancel(hdev, ENODEV);
1339 hci_req_lock(hdev);
1340
1341 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001342 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 hci_req_unlock(hdev);
1344 return 0;
1345 }
1346
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001347 /* Flush RX and TX works */
1348 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001349 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001351 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001352 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001353 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001354 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001355 }
1356
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001357 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001358 cancel_delayed_work(&hdev->service_cache);
1359
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001360 cancel_delayed_work_sync(&hdev->le_scan_disable);
1361
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001362 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001363 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001365 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
1367 hci_notify(hdev, HCI_DEV_DOWN);
1368
1369 if (hdev->flush)
1370 hdev->flush(hdev);
1371
1372 /* Reset device */
1373 skb_queue_purge(&hdev->cmd_q);
1374 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001375 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001376 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001378 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 clear_bit(HCI_INIT, &hdev->flags);
1380 }
1381
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001382 /* flush cmd work */
1383 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
1385 /* Drop queues */
1386 skb_queue_purge(&hdev->rx_q);
1387 skb_queue_purge(&hdev->cmd_q);
1388 skb_queue_purge(&hdev->raw_q);
1389
1390 /* Drop last sent command */
1391 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001392 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 kfree_skb(hdev->sent_cmd);
1394 hdev->sent_cmd = NULL;
1395 }
1396
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001397 kfree_skb(hdev->recv_evt);
1398 hdev->recv_evt = NULL;
1399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 /* After this point our queues are empty
1401 * and no tasks are scheduled. */
1402 hdev->close(hdev);
1403
Johan Hedberg35b973c2013-03-15 17:06:59 -05001404 /* Clear flags */
1405 hdev->flags = 0;
1406 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1407
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001408 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1409 if (hdev->dev_type == HCI_BREDR) {
1410 hci_dev_lock(hdev);
1411 mgmt_powered(hdev, 0);
1412 hci_dev_unlock(hdev);
1413 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001414 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001415
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001416 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001417 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001418
Johan Hedberge59fda82012-02-22 18:11:53 +02001419 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001420 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001421
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 hci_req_unlock(hdev);
1423
1424 hci_dev_put(hdev);
1425 return 0;
1426}
1427
1428int hci_dev_close(__u16 dev)
1429{
1430 struct hci_dev *hdev;
1431 int err;
1432
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001433 hdev = hci_dev_get(dev);
1434 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001436
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001437 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1438 err = -EBUSY;
1439 goto done;
1440 }
1441
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001442 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1443 cancel_delayed_work(&hdev->power_off);
1444
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001446
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001447done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 hci_dev_put(hdev);
1449 return err;
1450}
1451
1452int hci_dev_reset(__u16 dev)
1453{
1454 struct hci_dev *hdev;
1455 int ret = 0;
1456
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001457 hdev = hci_dev_get(dev);
1458 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 return -ENODEV;
1460
1461 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Marcel Holtmann808a0492013-08-26 20:57:58 -07001463 if (!test_bit(HCI_UP, &hdev->flags)) {
1464 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001468 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1469 ret = -EBUSY;
1470 goto done;
1471 }
1472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 /* Drop queues */
1474 skb_queue_purge(&hdev->rx_q);
1475 skb_queue_purge(&hdev->cmd_q);
1476
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001477 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001478 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001480 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
1482 if (hdev->flush)
1483 hdev->flush(hdev);
1484
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001485 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001486 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
1488 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001489 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 hci_req_unlock(hdev);
1493 hci_dev_put(hdev);
1494 return ret;
1495}
1496
1497int hci_dev_reset_stat(__u16 dev)
1498{
1499 struct hci_dev *hdev;
1500 int ret = 0;
1501
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001502 hdev = hci_dev_get(dev);
1503 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 return -ENODEV;
1505
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001506 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1507 ret = -EBUSY;
1508 goto done;
1509 }
1510
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1512
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001513done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 return ret;
1516}
1517
1518int hci_dev_cmd(unsigned int cmd, void __user *arg)
1519{
1520 struct hci_dev *hdev;
1521 struct hci_dev_req dr;
1522 int err = 0;
1523
1524 if (copy_from_user(&dr, arg, sizeof(dr)))
1525 return -EFAULT;
1526
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001527 hdev = hci_dev_get(dr.dev_id);
1528 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 return -ENODEV;
1530
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001531 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1532 err = -EBUSY;
1533 goto done;
1534 }
1535
Johan Hedberg56f87902013-10-02 13:43:13 +03001536 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1537 err = -EOPNOTSUPP;
1538 goto done;
1539 }
1540
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 switch (cmd) {
1542 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001543 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1544 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 break;
1546
1547 case HCISETENCRYPT:
1548 if (!lmp_encrypt_capable(hdev)) {
1549 err = -EOPNOTSUPP;
1550 break;
1551 }
1552
1553 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1554 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001555 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1556 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 if (err)
1558 break;
1559 }
1560
Johan Hedberg01178cd2013-03-05 20:37:41 +02001561 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1562 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 break;
1564
1565 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001566 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1567 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 break;
1569
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001570 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001571 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1572 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001573 break;
1574
1575 case HCISETLINKMODE:
1576 hdev->link_mode = ((__u16) dr.dev_opt) &
1577 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1578 break;
1579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 case HCISETPTYPE:
1581 hdev->pkt_type = (__u16) dr.dev_opt;
1582 break;
1583
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001585 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1586 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 break;
1588
1589 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001590 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1591 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 break;
1593
1594 default:
1595 err = -EINVAL;
1596 break;
1597 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001598
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001599done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 hci_dev_put(hdev);
1601 return err;
1602}
1603
1604int hci_get_dev_list(void __user *arg)
1605{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001606 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 struct hci_dev_list_req *dl;
1608 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 int n = 0, size, err;
1610 __u16 dev_num;
1611
1612 if (get_user(dev_num, (__u16 __user *) arg))
1613 return -EFAULT;
1614
1615 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1616 return -EINVAL;
1617
1618 size = sizeof(*dl) + dev_num * sizeof(*dr);
1619
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001620 dl = kzalloc(size, GFP_KERNEL);
1621 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 return -ENOMEM;
1623
1624 dr = dl->dev_req;
1625
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001626 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001627 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001628 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001629 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001630
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001631 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1632 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001633
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 (dr + n)->dev_id = hdev->id;
1635 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001636
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 if (++n >= dev_num)
1638 break;
1639 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001640 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
1642 dl->dev_num = n;
1643 size = sizeof(*dl) + n * sizeof(*dr);
1644
1645 err = copy_to_user(arg, dl, size);
1646 kfree(dl);
1647
1648 return err ? -EFAULT : 0;
1649}
1650
1651int hci_get_dev_info(void __user *arg)
1652{
1653 struct hci_dev *hdev;
1654 struct hci_dev_info di;
1655 int err = 0;
1656
1657 if (copy_from_user(&di, arg, sizeof(di)))
1658 return -EFAULT;
1659
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001660 hdev = hci_dev_get(di.dev_id);
1661 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 return -ENODEV;
1663
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001664 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001665 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001666
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001667 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1668 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001669
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 strcpy(di.name, hdev->name);
1671 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001672 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 di.flags = hdev->flags;
1674 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001675 if (lmp_bredr_capable(hdev)) {
1676 di.acl_mtu = hdev->acl_mtu;
1677 di.acl_pkts = hdev->acl_pkts;
1678 di.sco_mtu = hdev->sco_mtu;
1679 di.sco_pkts = hdev->sco_pkts;
1680 } else {
1681 di.acl_mtu = hdev->le_mtu;
1682 di.acl_pkts = hdev->le_pkts;
1683 di.sco_mtu = 0;
1684 di.sco_pkts = 0;
1685 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 di.link_policy = hdev->link_policy;
1687 di.link_mode = hdev->link_mode;
1688
1689 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1690 memcpy(&di.features, &hdev->features, sizeof(di.features));
1691
1692 if (copy_to_user(arg, &di, sizeof(di)))
1693 err = -EFAULT;
1694
1695 hci_dev_put(hdev);
1696
1697 return err;
1698}
1699
1700/* ---- Interface to HCI drivers ---- */
1701
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001702static int hci_rfkill_set_block(void *data, bool blocked)
1703{
1704 struct hci_dev *hdev = data;
1705
1706 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1707
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001708 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1709 return -EBUSY;
1710
Johan Hedberg5e130362013-09-13 08:58:17 +03001711 if (blocked) {
1712 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001713 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1714 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001715 } else {
1716 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001717 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001718
1719 return 0;
1720}
1721
1722static const struct rfkill_ops hci_rfkill_ops = {
1723 .set_block = hci_rfkill_set_block,
1724};
1725
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001726static void hci_power_on(struct work_struct *work)
1727{
1728 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001729 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001730
1731 BT_DBG("%s", hdev->name);
1732
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001733 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001734 if (err < 0) {
1735 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001736 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001737 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001738
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001739 /* During the HCI setup phase, a few error conditions are
1740 * ignored and they need to be checked now. If they are still
1741 * valid, it is important to turn the device back off.
1742 */
1743 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1744 (hdev->dev_type == HCI_BREDR &&
1745 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1746 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001747 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1748 hci_dev_do_close(hdev);
1749 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001750 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1751 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001752 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001753
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001754 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001755 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001756}
1757
1758static void hci_power_off(struct work_struct *work)
1759{
Johan Hedberg32435532011-11-07 22:16:04 +02001760 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001761 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001762
1763 BT_DBG("%s", hdev->name);
1764
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001765 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001766}
1767
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001768static void hci_discov_off(struct work_struct *work)
1769{
1770 struct hci_dev *hdev;
1771 u8 scan = SCAN_PAGE;
1772
1773 hdev = container_of(work, struct hci_dev, discov_off.work);
1774
1775 BT_DBG("%s", hdev->name);
1776
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001777 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001778
1779 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1780
1781 hdev->discov_timeout = 0;
1782
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001783 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001784}
1785
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001786int hci_uuids_clear(struct hci_dev *hdev)
1787{
Johan Hedberg48210022013-01-27 00:31:28 +02001788 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001789
Johan Hedberg48210022013-01-27 00:31:28 +02001790 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1791 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001792 kfree(uuid);
1793 }
1794
1795 return 0;
1796}
1797
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001798int hci_link_keys_clear(struct hci_dev *hdev)
1799{
1800 struct list_head *p, *n;
1801
1802 list_for_each_safe(p, n, &hdev->link_keys) {
1803 struct link_key *key;
1804
1805 key = list_entry(p, struct link_key, list);
1806
1807 list_del(p);
1808 kfree(key);
1809 }
1810
1811 return 0;
1812}
1813
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001814int hci_smp_ltks_clear(struct hci_dev *hdev)
1815{
1816 struct smp_ltk *k, *tmp;
1817
1818 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1819 list_del(&k->list);
1820 kfree(k);
1821 }
1822
1823 return 0;
1824}
1825
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001826struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1827{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001828 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001829
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001830 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001831 if (bacmp(bdaddr, &k->bdaddr) == 0)
1832 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001833
1834 return NULL;
1835}
1836
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301837static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001838 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001839{
1840 /* Legacy key */
1841 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301842 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001843
1844 /* Debug keys are insecure so don't store them persistently */
1845 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301846 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001847
1848 /* Changed combination key and there's no previous one */
1849 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301850 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001851
1852 /* Security mode 3 case */
1853 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301854 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001855
1856 /* Neither local nor remote side had no-bonding as requirement */
1857 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301858 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001859
1860 /* Local side had dedicated bonding as requirement */
1861 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301862 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001863
1864 /* Remote side had dedicated bonding as requirement */
1865 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301866 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001867
1868 /* If none of the above criteria match, then don't store the key
1869 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301870 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001871}
1872
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001873struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001874{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001875 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001876
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001877 list_for_each_entry(k, &hdev->long_term_keys, list) {
1878 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001879 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001880 continue;
1881
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001882 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001883 }
1884
1885 return NULL;
1886}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001887
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001888struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001889 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001890{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001891 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001892
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001893 list_for_each_entry(k, &hdev->long_term_keys, list)
1894 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001895 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001896 return k;
1897
1898 return NULL;
1899}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001900
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001901int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001902 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001903{
1904 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301905 u8 old_key_type;
1906 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001907
1908 old_key = hci_find_link_key(hdev, bdaddr);
1909 if (old_key) {
1910 old_key_type = old_key->type;
1911 key = old_key;
1912 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001913 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001914 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1915 if (!key)
1916 return -ENOMEM;
1917 list_add(&key->list, &hdev->link_keys);
1918 }
1919
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001920 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001921
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001922 /* Some buggy controller combinations generate a changed
1923 * combination key for legacy pairing even when there's no
1924 * previous key */
1925 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001926 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001927 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001928 if (conn)
1929 conn->key_type = type;
1930 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001931
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001932 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001933 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001934 key->pin_len = pin_len;
1935
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001936 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001937 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001938 else
1939 key->type = type;
1940
Johan Hedberg4df378a2011-04-28 11:29:03 -07001941 if (!new_key)
1942 return 0;
1943
1944 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1945
Johan Hedberg744cf192011-11-08 20:40:14 +02001946 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001947
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301948 if (conn)
1949 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001950
1951 return 0;
1952}
1953
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001954int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001955 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001956 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001957{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001958 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001959
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001960 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1961 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001962
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001963 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1964 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001965 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001966 else {
1967 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001968 if (!key)
1969 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001970 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001971 }
1972
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001973 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001974 key->bdaddr_type = addr_type;
1975 memcpy(key->val, tk, sizeof(key->val));
1976 key->authenticated = authenticated;
1977 key->ediv = ediv;
1978 key->enc_size = enc_size;
1979 key->type = type;
1980 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001981
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001982 if (!new_key)
1983 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001984
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001985 if (type & HCI_SMP_LTK)
1986 mgmt_new_ltk(hdev, key, 1);
1987
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001988 return 0;
1989}
1990
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001991int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1992{
1993 struct link_key *key;
1994
1995 key = hci_find_link_key(hdev, bdaddr);
1996 if (!key)
1997 return -ENOENT;
1998
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001999 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002000
2001 list_del(&key->list);
2002 kfree(key);
2003
2004 return 0;
2005}
2006
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002007int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2008{
2009 struct smp_ltk *k, *tmp;
2010
2011 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2012 if (bacmp(bdaddr, &k->bdaddr))
2013 continue;
2014
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002015 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002016
2017 list_del(&k->list);
2018 kfree(k);
2019 }
2020
2021 return 0;
2022}
2023
Ville Tervo6bd32322011-02-16 16:32:41 +02002024/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002025static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002026{
2027 struct hci_dev *hdev = (void *) arg;
2028
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002029 if (hdev->sent_cmd) {
2030 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2031 u16 opcode = __le16_to_cpu(sent->opcode);
2032
2033 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2034 } else {
2035 BT_ERR("%s command tx timeout", hdev->name);
2036 }
2037
Ville Tervo6bd32322011-02-16 16:32:41 +02002038 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002039 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002040}
2041
Szymon Janc2763eda2011-03-22 13:12:22 +01002042struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002043 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002044{
2045 struct oob_data *data;
2046
2047 list_for_each_entry(data, &hdev->remote_oob_data, list)
2048 if (bacmp(bdaddr, &data->bdaddr) == 0)
2049 return data;
2050
2051 return NULL;
2052}
2053
2054int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2055{
2056 struct oob_data *data;
2057
2058 data = hci_find_remote_oob_data(hdev, bdaddr);
2059 if (!data)
2060 return -ENOENT;
2061
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002062 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002063
2064 list_del(&data->list);
2065 kfree(data);
2066
2067 return 0;
2068}
2069
2070int hci_remote_oob_data_clear(struct hci_dev *hdev)
2071{
2072 struct oob_data *data, *n;
2073
2074 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2075 list_del(&data->list);
2076 kfree(data);
2077 }
2078
2079 return 0;
2080}
2081
2082int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002083 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002084{
2085 struct oob_data *data;
2086
2087 data = hci_find_remote_oob_data(hdev, bdaddr);
2088
2089 if (!data) {
2090 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2091 if (!data)
2092 return -ENOMEM;
2093
2094 bacpy(&data->bdaddr, bdaddr);
2095 list_add(&data->list, &hdev->remote_oob_data);
2096 }
2097
2098 memcpy(data->hash, hash, sizeof(data->hash));
2099 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2100
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002101 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002102
2103 return 0;
2104}
2105
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002106struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002107{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002108 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002109
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002110 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002111 if (bacmp(bdaddr, &b->bdaddr) == 0)
2112 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002113
2114 return NULL;
2115}
2116
2117int hci_blacklist_clear(struct hci_dev *hdev)
2118{
2119 struct list_head *p, *n;
2120
2121 list_for_each_safe(p, n, &hdev->blacklist) {
2122 struct bdaddr_list *b;
2123
2124 b = list_entry(p, struct bdaddr_list, list);
2125
2126 list_del(p);
2127 kfree(b);
2128 }
2129
2130 return 0;
2131}
2132
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002133int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002134{
2135 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002136
2137 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2138 return -EBADF;
2139
Antti Julku5e762442011-08-25 16:48:02 +03002140 if (hci_blacklist_lookup(hdev, bdaddr))
2141 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002142
2143 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002144 if (!entry)
2145 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002146
2147 bacpy(&entry->bdaddr, bdaddr);
2148
2149 list_add(&entry->list, &hdev->blacklist);
2150
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002151 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002152}
2153
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002154int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002155{
2156 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002157
Szymon Janc1ec918c2011-11-16 09:32:21 +01002158 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002159 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002160
2161 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002162 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002163 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002164
2165 list_del(&entry->list);
2166 kfree(entry);
2167
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002168 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002169}
2170
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002171static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002172{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002173 if (status) {
2174 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002175
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002176 hci_dev_lock(hdev);
2177 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2178 hci_dev_unlock(hdev);
2179 return;
2180 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002181}
2182
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002183static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002184{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002185 /* General inquiry access code (GIAC) */
2186 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2187 struct hci_request req;
2188 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002189 int err;
2190
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002191 if (status) {
2192 BT_ERR("Failed to disable LE scanning: status %d", status);
2193 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002194 }
2195
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002196 switch (hdev->discovery.type) {
2197 case DISCOV_TYPE_LE:
2198 hci_dev_lock(hdev);
2199 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2200 hci_dev_unlock(hdev);
2201 break;
2202
2203 case DISCOV_TYPE_INTERLEAVED:
2204 hci_req_init(&req, hdev);
2205
2206 memset(&cp, 0, sizeof(cp));
2207 memcpy(&cp.lap, lap, sizeof(cp.lap));
2208 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2209 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2210
2211 hci_dev_lock(hdev);
2212
2213 hci_inquiry_cache_flush(hdev);
2214
2215 err = hci_req_run(&req, inquiry_complete);
2216 if (err) {
2217 BT_ERR("Inquiry request failed: err %d", err);
2218 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2219 }
2220
2221 hci_dev_unlock(hdev);
2222 break;
2223 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002224}
2225
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002226static void le_scan_disable_work(struct work_struct *work)
2227{
2228 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002229 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002230 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002231 struct hci_request req;
2232 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002233
2234 BT_DBG("%s", hdev->name);
2235
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002236 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002237
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002238 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002239 cp.enable = LE_SCAN_DISABLE;
2240 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002241
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002242 err = hci_req_run(&req, le_scan_disable_work_complete);
2243 if (err)
2244 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002245}
2246
David Herrmann9be0dab2012-04-22 14:39:57 +02002247/* Alloc HCI device */
2248struct hci_dev *hci_alloc_dev(void)
2249{
2250 struct hci_dev *hdev;
2251
2252 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2253 if (!hdev)
2254 return NULL;
2255
David Herrmannb1b813d2012-04-22 14:39:58 +02002256 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2257 hdev->esco_type = (ESCO_HV1);
2258 hdev->link_mode = (HCI_LM_ACCEPT);
2259 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002260 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2261 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002262
David Herrmannb1b813d2012-04-22 14:39:58 +02002263 hdev->sniff_max_interval = 800;
2264 hdev->sniff_min_interval = 80;
2265
2266 mutex_init(&hdev->lock);
2267 mutex_init(&hdev->req_lock);
2268
2269 INIT_LIST_HEAD(&hdev->mgmt_pending);
2270 INIT_LIST_HEAD(&hdev->blacklist);
2271 INIT_LIST_HEAD(&hdev->uuids);
2272 INIT_LIST_HEAD(&hdev->link_keys);
2273 INIT_LIST_HEAD(&hdev->long_term_keys);
2274 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002275 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002276
2277 INIT_WORK(&hdev->rx_work, hci_rx_work);
2278 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2279 INIT_WORK(&hdev->tx_work, hci_tx_work);
2280 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002281
David Herrmannb1b813d2012-04-22 14:39:58 +02002282 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2283 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2284 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2285
David Herrmannb1b813d2012-04-22 14:39:58 +02002286 skb_queue_head_init(&hdev->rx_q);
2287 skb_queue_head_init(&hdev->cmd_q);
2288 skb_queue_head_init(&hdev->raw_q);
2289
2290 init_waitqueue_head(&hdev->req_wait_q);
2291
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002292 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002293
David Herrmannb1b813d2012-04-22 14:39:58 +02002294 hci_init_sysfs(hdev);
2295 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002296
2297 return hdev;
2298}
2299EXPORT_SYMBOL(hci_alloc_dev);
2300
2301/* Free HCI device */
2302void hci_free_dev(struct hci_dev *hdev)
2303{
David Herrmann9be0dab2012-04-22 14:39:57 +02002304 /* will free via device release */
2305 put_device(&hdev->dev);
2306}
2307EXPORT_SYMBOL(hci_free_dev);
2308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309/* Register HCI device */
2310int hci_register_dev(struct hci_dev *hdev)
2311{
David Herrmannb1b813d2012-04-22 14:39:58 +02002312 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
David Herrmann010666a2012-01-07 15:47:07 +01002314 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 return -EINVAL;
2316
Mat Martineau08add512011-11-02 16:18:36 -07002317 /* Do not allow HCI_AMP devices to register at index 0,
2318 * so the index can be used as the AMP controller ID.
2319 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002320 switch (hdev->dev_type) {
2321 case HCI_BREDR:
2322 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2323 break;
2324 case HCI_AMP:
2325 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2326 break;
2327 default:
2328 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002330
Sasha Levin3df92b32012-05-27 22:36:56 +02002331 if (id < 0)
2332 return id;
2333
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 sprintf(hdev->name, "hci%d", id);
2335 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002336
2337 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2338
Kees Cookd8537542013-07-03 15:04:57 -07002339 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2340 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002341 if (!hdev->workqueue) {
2342 error = -ENOMEM;
2343 goto err;
2344 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002345
Kees Cookd8537542013-07-03 15:04:57 -07002346 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2347 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002348 if (!hdev->req_workqueue) {
2349 destroy_workqueue(hdev->workqueue);
2350 error = -ENOMEM;
2351 goto err;
2352 }
2353
David Herrmann33ca9542011-10-08 14:58:49 +02002354 error = hci_add_sysfs(hdev);
2355 if (error < 0)
2356 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002358 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002359 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2360 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002361 if (hdev->rfkill) {
2362 if (rfkill_register(hdev->rfkill) < 0) {
2363 rfkill_destroy(hdev->rfkill);
2364 hdev->rfkill = NULL;
2365 }
2366 }
2367
Johan Hedberg5e130362013-09-13 08:58:17 +03002368 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2369 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2370
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002371 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002372 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002373
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002374 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002375 /* Assume BR/EDR support until proven otherwise (such as
2376 * through reading supported features during init.
2377 */
2378 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2379 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002380
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002381 write_lock(&hci_dev_list_lock);
2382 list_add(&hdev->list, &hci_dev_list);
2383 write_unlock(&hci_dev_list_lock);
2384
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002386 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
Johan Hedberg19202572013-01-14 22:33:51 +02002388 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002389
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002391
David Herrmann33ca9542011-10-08 14:58:49 +02002392err_wqueue:
2393 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002394 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002395err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002396 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002397
David Herrmann33ca9542011-10-08 14:58:49 +02002398 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399}
2400EXPORT_SYMBOL(hci_register_dev);
2401
2402/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002403void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404{
Sasha Levin3df92b32012-05-27 22:36:56 +02002405 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002406
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002407 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408
Johan Hovold94324962012-03-15 14:48:41 +01002409 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2410
Sasha Levin3df92b32012-05-27 22:36:56 +02002411 id = hdev->id;
2412
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002413 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002415 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416
2417 hci_dev_do_close(hdev);
2418
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302419 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002420 kfree_skb(hdev->reassembly[i]);
2421
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002422 cancel_work_sync(&hdev->power_on);
2423
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002424 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002425 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002426 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002427 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002428 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002429 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002430
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002431 /* mgmt_index_removed should take care of emptying the
2432 * pending list */
2433 BUG_ON(!list_empty(&hdev->mgmt_pending));
2434
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 hci_notify(hdev, HCI_DEV_UNREG);
2436
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002437 if (hdev->rfkill) {
2438 rfkill_unregister(hdev->rfkill);
2439 rfkill_destroy(hdev->rfkill);
2440 }
2441
David Herrmannce242972011-10-08 14:58:48 +02002442 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002443
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002444 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002445 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002446
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002447 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002448 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002449 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002450 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002451 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002452 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002453 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002454
David Herrmanndc946bd2012-01-07 15:47:24 +01002455 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002456
2457 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458}
2459EXPORT_SYMBOL(hci_unregister_dev);
2460
2461/* Suspend HCI device */
2462int hci_suspend_dev(struct hci_dev *hdev)
2463{
2464 hci_notify(hdev, HCI_DEV_SUSPEND);
2465 return 0;
2466}
2467EXPORT_SYMBOL(hci_suspend_dev);
2468
2469/* Resume HCI device */
2470int hci_resume_dev(struct hci_dev *hdev)
2471{
2472 hci_notify(hdev, HCI_DEV_RESUME);
2473 return 0;
2474}
2475EXPORT_SYMBOL(hci_resume_dev);
2476
Marcel Holtmann76bca882009-11-18 00:40:39 +01002477/* Receive frame from HCI drivers */
2478int hci_recv_frame(struct sk_buff *skb)
2479{
2480 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2481 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002482 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002483 kfree_skb(skb);
2484 return -ENXIO;
2485 }
2486
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002487 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002488 bt_cb(skb)->incoming = 1;
2489
2490 /* Time stamp */
2491 __net_timestamp(skb);
2492
Marcel Holtmann76bca882009-11-18 00:40:39 +01002493 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002494 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002495
Marcel Holtmann76bca882009-11-18 00:40:39 +01002496 return 0;
2497}
2498EXPORT_SYMBOL(hci_recv_frame);
2499
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302500static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002501 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302502{
2503 int len = 0;
2504 int hlen = 0;
2505 int remain = count;
2506 struct sk_buff *skb;
2507 struct bt_skb_cb *scb;
2508
2509 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002510 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302511 return -EILSEQ;
2512
2513 skb = hdev->reassembly[index];
2514
2515 if (!skb) {
2516 switch (type) {
2517 case HCI_ACLDATA_PKT:
2518 len = HCI_MAX_FRAME_SIZE;
2519 hlen = HCI_ACL_HDR_SIZE;
2520 break;
2521 case HCI_EVENT_PKT:
2522 len = HCI_MAX_EVENT_SIZE;
2523 hlen = HCI_EVENT_HDR_SIZE;
2524 break;
2525 case HCI_SCODATA_PKT:
2526 len = HCI_MAX_SCO_SIZE;
2527 hlen = HCI_SCO_HDR_SIZE;
2528 break;
2529 }
2530
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002531 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302532 if (!skb)
2533 return -ENOMEM;
2534
2535 scb = (void *) skb->cb;
2536 scb->expect = hlen;
2537 scb->pkt_type = type;
2538
2539 skb->dev = (void *) hdev;
2540 hdev->reassembly[index] = skb;
2541 }
2542
2543 while (count) {
2544 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002545 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302546
2547 memcpy(skb_put(skb, len), data, len);
2548
2549 count -= len;
2550 data += len;
2551 scb->expect -= len;
2552 remain = count;
2553
2554 switch (type) {
2555 case HCI_EVENT_PKT:
2556 if (skb->len == HCI_EVENT_HDR_SIZE) {
2557 struct hci_event_hdr *h = hci_event_hdr(skb);
2558 scb->expect = h->plen;
2559
2560 if (skb_tailroom(skb) < scb->expect) {
2561 kfree_skb(skb);
2562 hdev->reassembly[index] = NULL;
2563 return -ENOMEM;
2564 }
2565 }
2566 break;
2567
2568 case HCI_ACLDATA_PKT:
2569 if (skb->len == HCI_ACL_HDR_SIZE) {
2570 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2571 scb->expect = __le16_to_cpu(h->dlen);
2572
2573 if (skb_tailroom(skb) < scb->expect) {
2574 kfree_skb(skb);
2575 hdev->reassembly[index] = NULL;
2576 return -ENOMEM;
2577 }
2578 }
2579 break;
2580
2581 case HCI_SCODATA_PKT:
2582 if (skb->len == HCI_SCO_HDR_SIZE) {
2583 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2584 scb->expect = h->dlen;
2585
2586 if (skb_tailroom(skb) < scb->expect) {
2587 kfree_skb(skb);
2588 hdev->reassembly[index] = NULL;
2589 return -ENOMEM;
2590 }
2591 }
2592 break;
2593 }
2594
2595 if (scb->expect == 0) {
2596 /* Complete frame */
2597
2598 bt_cb(skb)->pkt_type = type;
2599 hci_recv_frame(skb);
2600
2601 hdev->reassembly[index] = NULL;
2602 return remain;
2603 }
2604 }
2605
2606 return remain;
2607}
2608
Marcel Holtmannef222012007-07-11 06:42:04 +02002609int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2610{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302611 int rem = 0;
2612
Marcel Holtmannef222012007-07-11 06:42:04 +02002613 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2614 return -EILSEQ;
2615
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002616 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002617 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302618 if (rem < 0)
2619 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002620
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302621 data += (count - rem);
2622 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002623 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002624
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302625 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002626}
2627EXPORT_SYMBOL(hci_recv_fragment);
2628
Suraj Sumangala99811512010-07-14 13:02:19 +05302629#define STREAM_REASSEMBLY 0
2630
2631int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2632{
2633 int type;
2634 int rem = 0;
2635
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002636 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302637 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2638
2639 if (!skb) {
2640 struct { char type; } *pkt;
2641
2642 /* Start of the frame */
2643 pkt = data;
2644 type = pkt->type;
2645
2646 data++;
2647 count--;
2648 } else
2649 type = bt_cb(skb)->pkt_type;
2650
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002651 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002652 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302653 if (rem < 0)
2654 return rem;
2655
2656 data += (count - rem);
2657 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002658 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302659
2660 return rem;
2661}
2662EXPORT_SYMBOL(hci_recv_stream_fragment);
2663
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664/* ---- Interface to upper protocols ---- */
2665
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666int hci_register_cb(struct hci_cb *cb)
2667{
2668 BT_DBG("%p name %s", cb, cb->name);
2669
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002670 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002672 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
2674 return 0;
2675}
2676EXPORT_SYMBOL(hci_register_cb);
2677
2678int hci_unregister_cb(struct hci_cb *cb)
2679{
2680 BT_DBG("%p name %s", cb, cb->name);
2681
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002682 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002684 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685
2686 return 0;
2687}
2688EXPORT_SYMBOL(hci_unregister_cb);
2689
2690static int hci_send_frame(struct sk_buff *skb)
2691{
2692 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2693
2694 if (!hdev) {
2695 kfree_skb(skb);
2696 return -ENODEV;
2697 }
2698
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002699 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002701 /* Time stamp */
2702 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002704 /* Send copy to monitor */
2705 hci_send_to_monitor(hdev, skb);
2706
2707 if (atomic_read(&hdev->promisc)) {
2708 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002709 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 }
2711
2712 /* Get rid of skb owner, prior to sending to the driver. */
2713 skb_orphan(skb);
2714
2715 return hdev->send(skb);
2716}
2717
Johan Hedberg3119ae92013-03-05 20:37:44 +02002718void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2719{
2720 skb_queue_head_init(&req->cmd_q);
2721 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002722 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002723}
2724
2725int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2726{
2727 struct hci_dev *hdev = req->hdev;
2728 struct sk_buff *skb;
2729 unsigned long flags;
2730
2731 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2732
Andre Guedes5d73e032013-03-08 11:20:16 -03002733 /* If an error occured during request building, remove all HCI
2734 * commands queued on the HCI request queue.
2735 */
2736 if (req->err) {
2737 skb_queue_purge(&req->cmd_q);
2738 return req->err;
2739 }
2740
Johan Hedberg3119ae92013-03-05 20:37:44 +02002741 /* Do not allow empty requests */
2742 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002743 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002744
2745 skb = skb_peek_tail(&req->cmd_q);
2746 bt_cb(skb)->req.complete = complete;
2747
2748 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2749 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2750 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2751
2752 queue_work(hdev->workqueue, &hdev->cmd_work);
2753
2754 return 0;
2755}
2756
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002757static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002758 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759{
2760 int len = HCI_COMMAND_HDR_SIZE + plen;
2761 struct hci_command_hdr *hdr;
2762 struct sk_buff *skb;
2763
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002765 if (!skb)
2766 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
2768 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002769 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 hdr->plen = plen;
2771
2772 if (plen)
2773 memcpy(skb_put(skb, plen), param, plen);
2774
2775 BT_DBG("skb len %d", skb->len);
2776
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002777 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002779
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002780 return skb;
2781}
2782
2783/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002784int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2785 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002786{
2787 struct sk_buff *skb;
2788
2789 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2790
2791 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2792 if (!skb) {
2793 BT_ERR("%s no memory for command", hdev->name);
2794 return -ENOMEM;
2795 }
2796
Johan Hedberg11714b32013-03-05 20:37:47 +02002797 /* Stand-alone HCI commands must be flaged as
2798 * single-command requests.
2799 */
2800 bt_cb(skb)->req.start = true;
2801
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002803 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805 return 0;
2806}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807
Johan Hedberg71c76a12013-03-05 20:37:46 +02002808/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002809void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2810 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002811{
2812 struct hci_dev *hdev = req->hdev;
2813 struct sk_buff *skb;
2814
2815 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2816
Andre Guedes34739c12013-03-08 11:20:18 -03002817 /* If an error occured during request building, there is no point in
2818 * queueing the HCI command. We can simply return.
2819 */
2820 if (req->err)
2821 return;
2822
Johan Hedberg71c76a12013-03-05 20:37:46 +02002823 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2824 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002825 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2826 hdev->name, opcode);
2827 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002828 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002829 }
2830
2831 if (skb_queue_empty(&req->cmd_q))
2832 bt_cb(skb)->req.start = true;
2833
Johan Hedberg02350a72013-04-03 21:50:29 +03002834 bt_cb(skb)->req.event = event;
2835
Johan Hedberg71c76a12013-03-05 20:37:46 +02002836 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002837}
2838
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002839void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2840 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002841{
2842 hci_req_add_ev(req, opcode, plen, param, 0);
2843}
2844
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002846void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847{
2848 struct hci_command_hdr *hdr;
2849
2850 if (!hdev->sent_cmd)
2851 return NULL;
2852
2853 hdr = (void *) hdev->sent_cmd->data;
2854
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002855 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 return NULL;
2857
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002858 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
2860 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2861}
2862
2863/* Send ACL data */
2864static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2865{
2866 struct hci_acl_hdr *hdr;
2867 int len = skb->len;
2868
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002869 skb_push(skb, HCI_ACL_HDR_SIZE);
2870 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002871 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002872 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2873 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874}
2875
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002876static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002877 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002879 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 struct hci_dev *hdev = conn->hdev;
2881 struct sk_buff *list;
2882
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002883 skb->len = skb_headlen(skb);
2884 skb->data_len = 0;
2885
2886 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002887
2888 switch (hdev->dev_type) {
2889 case HCI_BREDR:
2890 hci_add_acl_hdr(skb, conn->handle, flags);
2891 break;
2892 case HCI_AMP:
2893 hci_add_acl_hdr(skb, chan->handle, flags);
2894 break;
2895 default:
2896 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2897 return;
2898 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002900 list = skb_shinfo(skb)->frag_list;
2901 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 /* Non fragmented */
2903 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2904
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002905 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 } else {
2907 /* Fragmented */
2908 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2909
2910 skb_shinfo(skb)->frag_list = NULL;
2911
2912 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002913 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002915 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002916
2917 flags &= ~ACL_START;
2918 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 do {
2920 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002921
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002923 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002924 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925
2926 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2927
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002928 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 } while (list);
2930
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002931 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002933}
2934
2935void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2936{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002937 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002938
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002939 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002940
2941 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002942
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002943 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002945 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947
2948/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002949void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950{
2951 struct hci_dev *hdev = conn->hdev;
2952 struct hci_sco_hdr hdr;
2953
2954 BT_DBG("%s len %d", hdev->name, skb->len);
2955
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002956 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 hdr.dlen = skb->len;
2958
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002959 skb_push(skb, HCI_SCO_HDR_SIZE);
2960 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002961 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962
2963 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002964 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002965
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002967 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969
2970/* ---- HCI TX task (outgoing data) ---- */
2971
2972/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002973static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2974 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975{
2976 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002977 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002978 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002980 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002982
2983 rcu_read_lock();
2984
2985 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002986 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002988
2989 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2990 continue;
2991
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 num++;
2993
2994 if (c->sent < min) {
2995 min = c->sent;
2996 conn = c;
2997 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002998
2999 if (hci_conn_num(hdev, type) == num)
3000 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 }
3002
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003003 rcu_read_unlock();
3004
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003006 int cnt, q;
3007
3008 switch (conn->type) {
3009 case ACL_LINK:
3010 cnt = hdev->acl_cnt;
3011 break;
3012 case SCO_LINK:
3013 case ESCO_LINK:
3014 cnt = hdev->sco_cnt;
3015 break;
3016 case LE_LINK:
3017 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3018 break;
3019 default:
3020 cnt = 0;
3021 BT_ERR("Unknown link type");
3022 }
3023
3024 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 *quote = q ? q : 1;
3026 } else
3027 *quote = 0;
3028
3029 BT_DBG("conn %p quote %d", conn, *quote);
3030 return conn;
3031}
3032
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003033static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034{
3035 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003036 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037
Ville Tervobae1f5d92011-02-10 22:38:53 -03003038 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003040 rcu_read_lock();
3041
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003043 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003044 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003045 BT_ERR("%s killing stalled connection %pMR",
3046 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003047 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 }
3049 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003050
3051 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052}
3053
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003054static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3055 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003056{
3057 struct hci_conn_hash *h = &hdev->conn_hash;
3058 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003059 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003060 struct hci_conn *conn;
3061 int cnt, q, conn_num = 0;
3062
3063 BT_DBG("%s", hdev->name);
3064
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003065 rcu_read_lock();
3066
3067 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003068 struct hci_chan *tmp;
3069
3070 if (conn->type != type)
3071 continue;
3072
3073 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3074 continue;
3075
3076 conn_num++;
3077
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003078 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003079 struct sk_buff *skb;
3080
3081 if (skb_queue_empty(&tmp->data_q))
3082 continue;
3083
3084 skb = skb_peek(&tmp->data_q);
3085 if (skb->priority < cur_prio)
3086 continue;
3087
3088 if (skb->priority > cur_prio) {
3089 num = 0;
3090 min = ~0;
3091 cur_prio = skb->priority;
3092 }
3093
3094 num++;
3095
3096 if (conn->sent < min) {
3097 min = conn->sent;
3098 chan = tmp;
3099 }
3100 }
3101
3102 if (hci_conn_num(hdev, type) == conn_num)
3103 break;
3104 }
3105
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003106 rcu_read_unlock();
3107
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003108 if (!chan)
3109 return NULL;
3110
3111 switch (chan->conn->type) {
3112 case ACL_LINK:
3113 cnt = hdev->acl_cnt;
3114 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003115 case AMP_LINK:
3116 cnt = hdev->block_cnt;
3117 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003118 case SCO_LINK:
3119 case ESCO_LINK:
3120 cnt = hdev->sco_cnt;
3121 break;
3122 case LE_LINK:
3123 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3124 break;
3125 default:
3126 cnt = 0;
3127 BT_ERR("Unknown link type");
3128 }
3129
3130 q = cnt / num;
3131 *quote = q ? q : 1;
3132 BT_DBG("chan %p quote %d", chan, *quote);
3133 return chan;
3134}
3135
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003136static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3137{
3138 struct hci_conn_hash *h = &hdev->conn_hash;
3139 struct hci_conn *conn;
3140 int num = 0;
3141
3142 BT_DBG("%s", hdev->name);
3143
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003144 rcu_read_lock();
3145
3146 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003147 struct hci_chan *chan;
3148
3149 if (conn->type != type)
3150 continue;
3151
3152 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3153 continue;
3154
3155 num++;
3156
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003157 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003158 struct sk_buff *skb;
3159
3160 if (chan->sent) {
3161 chan->sent = 0;
3162 continue;
3163 }
3164
3165 if (skb_queue_empty(&chan->data_q))
3166 continue;
3167
3168 skb = skb_peek(&chan->data_q);
3169 if (skb->priority >= HCI_PRIO_MAX - 1)
3170 continue;
3171
3172 skb->priority = HCI_PRIO_MAX - 1;
3173
3174 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003175 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003176 }
3177
3178 if (hci_conn_num(hdev, type) == num)
3179 break;
3180 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003181
3182 rcu_read_unlock();
3183
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003184}
3185
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003186static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3187{
3188 /* Calculate count of blocks used by this packet */
3189 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3190}
3191
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003192static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 if (!test_bit(HCI_RAW, &hdev->flags)) {
3195 /* ACL tx timeout must be longer than maximum
3196 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003197 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003198 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003199 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003201}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003203static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003204{
3205 unsigned int cnt = hdev->acl_cnt;
3206 struct hci_chan *chan;
3207 struct sk_buff *skb;
3208 int quote;
3209
3210 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003211
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003212 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003213 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003214 u32 priority = (skb_peek(&chan->data_q))->priority;
3215 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003216 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003217 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003218
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003219 /* Stop if priority has changed */
3220 if (skb->priority < priority)
3221 break;
3222
3223 skb = skb_dequeue(&chan->data_q);
3224
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003225 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003226 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003227
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 hci_send_frame(skb);
3229 hdev->acl_last_tx = jiffies;
3230
3231 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003232 chan->sent++;
3233 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 }
3235 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003236
3237 if (cnt != hdev->acl_cnt)
3238 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239}
3240
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003241static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003242{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003243 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003244 struct hci_chan *chan;
3245 struct sk_buff *skb;
3246 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003247 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003248
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003249 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003250
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003251 BT_DBG("%s", hdev->name);
3252
3253 if (hdev->dev_type == HCI_AMP)
3254 type = AMP_LINK;
3255 else
3256 type = ACL_LINK;
3257
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003258 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003259 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003260 u32 priority = (skb_peek(&chan->data_q))->priority;
3261 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3262 int blocks;
3263
3264 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003265 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003266
3267 /* Stop if priority has changed */
3268 if (skb->priority < priority)
3269 break;
3270
3271 skb = skb_dequeue(&chan->data_q);
3272
3273 blocks = __get_blocks(hdev, skb);
3274 if (blocks > hdev->block_cnt)
3275 return;
3276
3277 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003278 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003279
3280 hci_send_frame(skb);
3281 hdev->acl_last_tx = jiffies;
3282
3283 hdev->block_cnt -= blocks;
3284 quote -= blocks;
3285
3286 chan->sent += blocks;
3287 chan->conn->sent += blocks;
3288 }
3289 }
3290
3291 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003292 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003293}
3294
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003295static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003296{
3297 BT_DBG("%s", hdev->name);
3298
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003299 /* No ACL link over BR/EDR controller */
3300 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3301 return;
3302
3303 /* No AMP link over AMP controller */
3304 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003305 return;
3306
3307 switch (hdev->flow_ctl_mode) {
3308 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3309 hci_sched_acl_pkt(hdev);
3310 break;
3311
3312 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3313 hci_sched_acl_blk(hdev);
3314 break;
3315 }
3316}
3317
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003319static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320{
3321 struct hci_conn *conn;
3322 struct sk_buff *skb;
3323 int quote;
3324
3325 BT_DBG("%s", hdev->name);
3326
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003327 if (!hci_conn_num(hdev, SCO_LINK))
3328 return;
3329
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3331 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3332 BT_DBG("skb %p len %d", skb, skb->len);
3333 hci_send_frame(skb);
3334
3335 conn->sent++;
3336 if (conn->sent == ~0)
3337 conn->sent = 0;
3338 }
3339 }
3340}
3341
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003342static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003343{
3344 struct hci_conn *conn;
3345 struct sk_buff *skb;
3346 int quote;
3347
3348 BT_DBG("%s", hdev->name);
3349
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003350 if (!hci_conn_num(hdev, ESCO_LINK))
3351 return;
3352
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003353 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3354 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003355 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3356 BT_DBG("skb %p len %d", skb, skb->len);
3357 hci_send_frame(skb);
3358
3359 conn->sent++;
3360 if (conn->sent == ~0)
3361 conn->sent = 0;
3362 }
3363 }
3364}
3365
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003366static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003367{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003368 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003369 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003370 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003371
3372 BT_DBG("%s", hdev->name);
3373
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003374 if (!hci_conn_num(hdev, LE_LINK))
3375 return;
3376
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003377 if (!test_bit(HCI_RAW, &hdev->flags)) {
3378 /* LE tx timeout must be longer than maximum
3379 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003380 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003381 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003382 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003383 }
3384
3385 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003386 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003387 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003388 u32 priority = (skb_peek(&chan->data_q))->priority;
3389 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003390 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003391 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003392
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003393 /* Stop if priority has changed */
3394 if (skb->priority < priority)
3395 break;
3396
3397 skb = skb_dequeue(&chan->data_q);
3398
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003399 hci_send_frame(skb);
3400 hdev->le_last_tx = jiffies;
3401
3402 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003403 chan->sent++;
3404 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003405 }
3406 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003407
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003408 if (hdev->le_pkts)
3409 hdev->le_cnt = cnt;
3410 else
3411 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003412
3413 if (cnt != tmp)
3414 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003415}
3416
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003417static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003419 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 struct sk_buff *skb;
3421
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003422 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003423 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424
Marcel Holtmann52de5992013-09-03 18:08:38 -07003425 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3426 /* Schedule queues and send stuff to HCI driver */
3427 hci_sched_acl(hdev);
3428 hci_sched_sco(hdev);
3429 hci_sched_esco(hdev);
3430 hci_sched_le(hdev);
3431 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003432
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 /* Send next queued raw (unknown type) packet */
3434 while ((skb = skb_dequeue(&hdev->raw_q)))
3435 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436}
3437
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003438/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439
3440/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003441static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442{
3443 struct hci_acl_hdr *hdr = (void *) skb->data;
3444 struct hci_conn *conn;
3445 __u16 handle, flags;
3446
3447 skb_pull(skb, HCI_ACL_HDR_SIZE);
3448
3449 handle = __le16_to_cpu(hdr->handle);
3450 flags = hci_flags(handle);
3451 handle = hci_handle(handle);
3452
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003453 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003454 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455
3456 hdev->stat.acl_rx++;
3457
3458 hci_dev_lock(hdev);
3459 conn = hci_conn_hash_lookup_handle(hdev, handle);
3460 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003461
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003463 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003466 l2cap_recv_acldata(conn, skb, flags);
3467 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003469 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003470 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 }
3472
3473 kfree_skb(skb);
3474}
3475
3476/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003477static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478{
3479 struct hci_sco_hdr *hdr = (void *) skb->data;
3480 struct hci_conn *conn;
3481 __u16 handle;
3482
3483 skb_pull(skb, HCI_SCO_HDR_SIZE);
3484
3485 handle = __le16_to_cpu(hdr->handle);
3486
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003487 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488
3489 hdev->stat.sco_rx++;
3490
3491 hci_dev_lock(hdev);
3492 conn = hci_conn_hash_lookup_handle(hdev, handle);
3493 hci_dev_unlock(hdev);
3494
3495 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003497 sco_recv_scodata(conn, skb);
3498 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003500 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003501 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 }
3503
3504 kfree_skb(skb);
3505}
3506
Johan Hedberg9238f362013-03-05 20:37:48 +02003507static bool hci_req_is_complete(struct hci_dev *hdev)
3508{
3509 struct sk_buff *skb;
3510
3511 skb = skb_peek(&hdev->cmd_q);
3512 if (!skb)
3513 return true;
3514
3515 return bt_cb(skb)->req.start;
3516}
3517
Johan Hedberg42c6b122013-03-05 20:37:49 +02003518static void hci_resend_last(struct hci_dev *hdev)
3519{
3520 struct hci_command_hdr *sent;
3521 struct sk_buff *skb;
3522 u16 opcode;
3523
3524 if (!hdev->sent_cmd)
3525 return;
3526
3527 sent = (void *) hdev->sent_cmd->data;
3528 opcode = __le16_to_cpu(sent->opcode);
3529 if (opcode == HCI_OP_RESET)
3530 return;
3531
3532 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3533 if (!skb)
3534 return;
3535
3536 skb_queue_head(&hdev->cmd_q, skb);
3537 queue_work(hdev->workqueue, &hdev->cmd_work);
3538}
3539
Johan Hedberg9238f362013-03-05 20:37:48 +02003540void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3541{
3542 hci_req_complete_t req_complete = NULL;
3543 struct sk_buff *skb;
3544 unsigned long flags;
3545
3546 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3547
Johan Hedberg42c6b122013-03-05 20:37:49 +02003548 /* If the completed command doesn't match the last one that was
3549 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003550 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003551 if (!hci_sent_cmd_data(hdev, opcode)) {
3552 /* Some CSR based controllers generate a spontaneous
3553 * reset complete event during init and any pending
3554 * command will never be completed. In such a case we
3555 * need to resend whatever was the last sent
3556 * command.
3557 */
3558 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3559 hci_resend_last(hdev);
3560
Johan Hedberg9238f362013-03-05 20:37:48 +02003561 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003562 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003563
3564 /* If the command succeeded and there's still more commands in
3565 * this request the request is not yet complete.
3566 */
3567 if (!status && !hci_req_is_complete(hdev))
3568 return;
3569
3570 /* If this was the last command in a request the complete
3571 * callback would be found in hdev->sent_cmd instead of the
3572 * command queue (hdev->cmd_q).
3573 */
3574 if (hdev->sent_cmd) {
3575 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003576
3577 if (req_complete) {
3578 /* We must set the complete callback to NULL to
3579 * avoid calling the callback more than once if
3580 * this function gets called again.
3581 */
3582 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3583
Johan Hedberg9238f362013-03-05 20:37:48 +02003584 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003585 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003586 }
3587
3588 /* Remove all pending commands belonging to this request */
3589 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3590 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3591 if (bt_cb(skb)->req.start) {
3592 __skb_queue_head(&hdev->cmd_q, skb);
3593 break;
3594 }
3595
3596 req_complete = bt_cb(skb)->req.complete;
3597 kfree_skb(skb);
3598 }
3599 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3600
3601call_complete:
3602 if (req_complete)
3603 req_complete(hdev, status);
3604}
3605
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003606static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003608 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 struct sk_buff *skb;
3610
3611 BT_DBG("%s", hdev->name);
3612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003614 /* Send copy to monitor */
3615 hci_send_to_monitor(hdev, skb);
3616
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 if (atomic_read(&hdev->promisc)) {
3618 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003619 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 }
3621
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003622 if (test_bit(HCI_RAW, &hdev->flags) ||
3623 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 kfree_skb(skb);
3625 continue;
3626 }
3627
3628 if (test_bit(HCI_INIT, &hdev->flags)) {
3629 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003630 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 case HCI_ACLDATA_PKT:
3632 case HCI_SCODATA_PKT:
3633 kfree_skb(skb);
3634 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003635 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 }
3637
3638 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003639 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003641 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642 hci_event_packet(hdev, skb);
3643 break;
3644
3645 case HCI_ACLDATA_PKT:
3646 BT_DBG("%s ACL data packet", hdev->name);
3647 hci_acldata_packet(hdev, skb);
3648 break;
3649
3650 case HCI_SCODATA_PKT:
3651 BT_DBG("%s SCO data packet", hdev->name);
3652 hci_scodata_packet(hdev, skb);
3653 break;
3654
3655 default:
3656 kfree_skb(skb);
3657 break;
3658 }
3659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660}
3661
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003662static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003664 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 struct sk_buff *skb;
3666
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003667 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3668 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003671 if (atomic_read(&hdev->cmd_cnt)) {
3672 skb = skb_dequeue(&hdev->cmd_q);
3673 if (!skb)
3674 return;
3675
Wei Yongjun7585b972009-02-25 18:29:52 +08003676 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003678 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003679 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 atomic_dec(&hdev->cmd_cnt);
3681 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003682 if (test_bit(HCI_RESET, &hdev->flags))
3683 del_timer(&hdev->cmd_timer);
3684 else
3685 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003686 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687 } else {
3688 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003689 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 }
3691 }
3692}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003693
Andre Guedes31f79562012-04-24 21:02:53 -03003694u8 bdaddr_to_le(u8 bdaddr_type)
3695{
3696 switch (bdaddr_type) {
3697 case BDADDR_LE_PUBLIC:
3698 return ADDR_LE_DEV_PUBLIC;
3699
3700 default:
3701 /* Fallback to LE Random address type */
3702 return ADDR_LE_DEV_RANDOM;
3703 }
3704}