blob: c53f7f9c630ae9c4ec1912698ee375482f3d9aeb [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700321
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200332
333 BT_DBG("%s %ld", hdev->name, opt);
334
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300338
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339 switch (hdev->dev_type) {
340 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200342 break;
343
344 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200345 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200352}
353
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700356 struct hci_dev *hdev = req->hdev;
357
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358 __le16 param;
359 __u8 flt_type;
360
361 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200362 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200363
364 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
367 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200369
370 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700373 /* Read Number of Supported IAC */
374 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
375
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700376 /* Read Current IAC LAP */
377 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
378
Johan Hedberg2177bab2013-03-05 20:37:43 +0200379 /* Clear Event Filters */
380 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200382
383 /* Connection accept timeout ~20 secs */
384 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700387 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
388 * but it does not support page scan related HCI commands.
389 */
390 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500391 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
392 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
393 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200394}
395
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200397{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300398 struct hci_dev *hdev = req->hdev;
399
Johan Hedberg2177bab2013-03-05 20:37:43 +0200400 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200402
403 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200404 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200405
406 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408
409 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200411
412 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200413 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300414
415 /* LE-only controllers have LE implicitly enabled */
416 if (!lmp_bredr_capable(hdev))
417 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200418}
419
420static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
421{
422 if (lmp_ext_inq_capable(hdev))
423 return 0x02;
424
425 if (lmp_inq_rssi_capable(hdev))
426 return 0x01;
427
428 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
429 hdev->lmp_subver == 0x0757)
430 return 0x01;
431
432 if (hdev->manufacturer == 15) {
433 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
434 return 0x01;
435 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
436 return 0x01;
437 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
438 return 0x01;
439 }
440
441 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
442 hdev->lmp_subver == 0x1805)
443 return 0x01;
444
445 return 0x00;
446}
447
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449{
450 u8 mode;
451
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200453
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455}
456
Johan Hedberg42c6b122013-03-05 20:37:49 +0200457static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200458{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 struct hci_dev *hdev = req->hdev;
460
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461 /* The second byte is 0xff instead of 0x9f (two reserved bits
462 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
463 * command otherwise.
464 */
465 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
466
467 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
468 * any event mask for pre 1.2 devices.
469 */
470 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
471 return;
472
473 if (lmp_bredr_capable(hdev)) {
474 events[4] |= 0x01; /* Flow Specification Complete */
475 events[4] |= 0x02; /* Inquiry Result with RSSI */
476 events[4] |= 0x04; /* Read Remote Extended Features Complete */
477 events[5] |= 0x08; /* Synchronous Connection Complete */
478 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700479 } else {
480 /* Use a different default for LE-only devices */
481 memset(events, 0, sizeof(events));
482 events[0] |= 0x10; /* Disconnection Complete */
483 events[0] |= 0x80; /* Encryption Change */
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491 }
492
493 if (lmp_inq_rssi_capable(hdev))
494 events[4] |= 0x02; /* Inquiry Result with RSSI */
495
496 if (lmp_sniffsubr_capable(hdev))
497 events[5] |= 0x20; /* Sniff Subrating */
498
499 if (lmp_pause_enc_capable(hdev))
500 events[5] |= 0x80; /* Encryption Key Refresh Complete */
501
502 if (lmp_ext_inq_capable(hdev))
503 events[5] |= 0x40; /* Extended Inquiry Result */
504
505 if (lmp_no_flush_capable(hdev))
506 events[7] |= 0x01; /* Enhanced Flush Complete */
507
508 if (lmp_lsto_capable(hdev))
509 events[6] |= 0x80; /* Link Supervision Timeout Changed */
510
511 if (lmp_ssp_capable(hdev)) {
512 events[6] |= 0x01; /* IO Capability Request */
513 events[6] |= 0x02; /* IO Capability Response */
514 events[6] |= 0x04; /* User Confirmation Request */
515 events[6] |= 0x08; /* User Passkey Request */
516 events[6] |= 0x10; /* Remote OOB Data Request */
517 events[6] |= 0x20; /* Simple Pairing Complete */
518 events[7] |= 0x04; /* User Passkey Notification */
519 events[7] |= 0x08; /* Keypress Notification */
520 events[7] |= 0x10; /* Remote Host Supported
521 * Features Notification
522 */
523 }
524
525 if (lmp_le_capable(hdev))
526 events[7] |= 0x20; /* LE Meta-Event */
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529
530 if (lmp_le_capable(hdev)) {
531 memset(events, 0, sizeof(events));
532 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
534 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535 }
536}
537
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 struct hci_dev *hdev = req->hdev;
541
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300544 else
545 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546
547 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300552 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
553 * local supported commands HCI command.
554 */
555 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200556 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557
558 if (lmp_ssp_capable(hdev)) {
559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
560 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
562 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 } else {
564 struct hci_cp_write_eir cp;
565
566 memset(hdev->eir, 0, sizeof(hdev->eir));
567 memset(&cp, 0, sizeof(cp));
568
Johan Hedberg42c6b122013-03-05 20:37:49 +0200569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200570 }
571 }
572
573 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575
576 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578
579 if (lmp_ext_feat_capable(hdev)) {
580 struct hci_cp_read_local_ext_features cp;
581
582 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
584 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585 }
586
587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
588 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
590 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200591 }
592}
593
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200596 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200597 struct hci_cp_write_def_link_policy cp;
598 u16 link_policy = 0;
599
600 if (lmp_rswitch_capable(hdev))
601 link_policy |= HCI_LP_RSWITCH;
602 if (lmp_hold_capable(hdev))
603 link_policy |= HCI_LP_HOLD;
604 if (lmp_sniff_capable(hdev))
605 link_policy |= HCI_LP_SNIFF;
606 if (lmp_park_capable(hdev))
607 link_policy |= HCI_LP_PARK;
608
609 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611}
612
Johan Hedberg42c6b122013-03-05 20:37:49 +0200613static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200614{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616 struct hci_cp_write_le_host_supported cp;
617
Johan Hedbergc73eee92013-04-19 18:35:21 +0300618 /* LE-only devices do not support explicit enablement */
619 if (!lmp_bredr_capable(hdev))
620 return;
621
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622 memset(&cp, 0, sizeof(cp));
623
624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
625 cp.le = 0x01;
626 cp.simul = lmp_le_br_capable(hdev);
627 }
628
629 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
631 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200632}
633
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300634static void hci_set_event_mask_page_2(struct hci_request *req)
635{
636 struct hci_dev *hdev = req->hdev;
637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
638
639 /* If Connectionless Slave Broadcast master role is supported
640 * enable all necessary events for it.
641 */
642 if (hdev->features[2][0] & 0x01) {
643 events[1] |= 0x40; /* Triggered Clock Capture */
644 events[1] |= 0x80; /* Synchronization Train Complete */
645 events[2] |= 0x10; /* Slave Page Response Timeout */
646 events[2] |= 0x20; /* CSB Channel Map Change */
647 }
648
649 /* If Connectionless Slave Broadcast slave role is supported
650 * enable all necessary events for it.
651 */
652 if (hdev->features[2][0] & 0x02) {
653 events[2] |= 0x01; /* Synchronization Train Received */
654 events[2] |= 0x02; /* CSB Receive */
655 events[2] |= 0x04; /* CSB Timeout */
656 events[2] |= 0x08; /* Truncated Page Complete */
657 }
658
659 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
660}
661
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300665 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200666
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100667 /* Some Broadcom based Bluetooth controllers do not support the
668 * Delete Stored Link Key command. They are clearly indicating its
669 * absence in the bit mask of supported commands.
670 *
671 * Check the supported commands and only if the the command is marked
672 * as supported send it. If not supported assume that the controller
673 * does not have actual support for stored link keys which makes this
674 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700675 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300676 if (hdev->commands[6] & 0x80) {
677 struct hci_cp_delete_stored_link_key cp;
678
679 bacpy(&cp.bdaddr, BDADDR_ANY);
680 cp.delete_all = 0x01;
681 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
682 sizeof(cp), &cp);
683 }
684
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200687
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500688 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200689 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500690 hci_update_ad(req);
691 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300692
693 /* Read features beyond page 1 if available */
694 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
695 struct hci_cp_read_local_ext_features cp;
696
697 cp.page = p;
698 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
699 sizeof(cp), &cp);
700 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200701}
702
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300703static void hci_init4_req(struct hci_request *req, unsigned long opt)
704{
705 struct hci_dev *hdev = req->hdev;
706
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300707 /* Set event mask page 2 if the HCI command for it is supported */
708 if (hdev->commands[22] & 0x04)
709 hci_set_event_mask_page_2(req);
710
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300711 /* Check for Synchronization Train support */
712 if (hdev->features[2][0] & 0x04)
713 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
714}
715
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716static int __hci_init(struct hci_dev *hdev)
717{
718 int err;
719
720 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
721 if (err < 0)
722 return err;
723
724 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
725 * BR/EDR/LE type controllers. AMP controllers only need the
726 * first stage init.
727 */
728 if (hdev->dev_type != HCI_BREDR)
729 return 0;
730
731 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
732 if (err < 0)
733 return err;
734
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300735 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
736 if (err < 0)
737 return err;
738
739 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200740}
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
744 __u8 scan = opt;
745
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
748 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753{
754 __u8 auth = opt;
755
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760}
761
Johan Hedberg42c6b122013-03-05 20:37:49 +0200762static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
764 __u8 encrypt = opt;
765
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200768 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200769 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770}
771
Johan Hedberg42c6b122013-03-05 20:37:49 +0200772static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200773{
774 __le16 policy = cpu_to_le16(opt);
775
Johan Hedberg42c6b122013-03-05 20:37:49 +0200776 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200777
778 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200779 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200780}
781
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900782/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 * Device is held on return. */
784struct hci_dev *hci_dev_get(int index)
785{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200786 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
788 BT_DBG("%d", index);
789
790 if (index < 0)
791 return NULL;
792
793 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200794 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 if (d->id == index) {
796 hdev = hci_dev_hold(d);
797 break;
798 }
799 }
800 read_unlock(&hci_dev_list_lock);
801 return hdev;
802}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200805
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200806bool hci_discovery_active(struct hci_dev *hdev)
807{
808 struct discovery_state *discov = &hdev->discovery;
809
Andre Guedes6fbe1952012-02-03 17:47:58 -0300810 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300811 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300812 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200813 return true;
814
Andre Guedes6fbe1952012-02-03 17:47:58 -0300815 default:
816 return false;
817 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200818}
819
Johan Hedbergff9ef572012-01-04 14:23:45 +0200820void hci_discovery_set_state(struct hci_dev *hdev, int state)
821{
822 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
823
824 if (hdev->discovery.state == state)
825 return;
826
827 switch (state) {
828 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300829 if (hdev->discovery.state != DISCOVERY_STARTING)
830 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200831 break;
832 case DISCOVERY_STARTING:
833 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300834 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200835 mgmt_discovering(hdev, 1);
836 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200837 case DISCOVERY_RESOLVING:
838 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200839 case DISCOVERY_STOPPING:
840 break;
841 }
842
843 hdev->discovery.state = state;
844}
845
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300846void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847{
Johan Hedberg30883512012-01-04 14:16:21 +0200848 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200849 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
Johan Hedberg561aafb2012-01-04 13:31:59 +0200851 list_for_each_entry_safe(p, n, &cache->all, all) {
852 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200853 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200855
856 INIT_LIST_HEAD(&cache->unknown);
857 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858}
859
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300860struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
861 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862{
Johan Hedberg30883512012-01-04 14:16:21 +0200863 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 struct inquiry_entry *e;
865
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300866 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Johan Hedberg561aafb2012-01-04 13:31:59 +0200868 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200870 return e;
871 }
872
873 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874}
875
Johan Hedberg561aafb2012-01-04 13:31:59 +0200876struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300877 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200878{
Johan Hedberg30883512012-01-04 14:16:21 +0200879 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200880 struct inquiry_entry *e;
881
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300882 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200883
884 list_for_each_entry(e, &cache->unknown, list) {
885 if (!bacmp(&e->data.bdaddr, bdaddr))
886 return e;
887 }
888
889 return NULL;
890}
891
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200892struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300893 bdaddr_t *bdaddr,
894 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200895{
896 struct discovery_state *cache = &hdev->discovery;
897 struct inquiry_entry *e;
898
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300899 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200900
901 list_for_each_entry(e, &cache->resolve, list) {
902 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
903 return e;
904 if (!bacmp(&e->data.bdaddr, bdaddr))
905 return e;
906 }
907
908 return NULL;
909}
910
Johan Hedberga3d4e202012-01-09 00:53:02 +0200911void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300912 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200913{
914 struct discovery_state *cache = &hdev->discovery;
915 struct list_head *pos = &cache->resolve;
916 struct inquiry_entry *p;
917
918 list_del(&ie->list);
919
920 list_for_each_entry(p, &cache->resolve, list) {
921 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300922 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200923 break;
924 pos = &p->list;
925 }
926
927 list_add(&ie->list, pos);
928}
929
Johan Hedberg31754052012-01-04 13:39:52 +0200930bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300931 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
Johan Hedberg30883512012-01-04 14:16:21 +0200933 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200934 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300936 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
Szymon Janc2b2fec42012-11-20 11:38:54 +0100938 hci_remove_remote_oob_data(hdev, &data->bdaddr);
939
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200940 if (ssp)
941 *ssp = data->ssp_mode;
942
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200943 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200944 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200945 if (ie->data.ssp_mode && ssp)
946 *ssp = true;
947
Johan Hedberga3d4e202012-01-09 00:53:02 +0200948 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300949 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200950 ie->data.rssi = data->rssi;
951 hci_inquiry_cache_update_resolve(hdev, ie);
952 }
953
Johan Hedberg561aafb2012-01-04 13:31:59 +0200954 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200955 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200956
Johan Hedberg561aafb2012-01-04 13:31:59 +0200957 /* Entry not in the cache. Add new one. */
958 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
959 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200960 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200961
962 list_add(&ie->all, &cache->all);
963
964 if (name_known) {
965 ie->name_state = NAME_KNOWN;
966 } else {
967 ie->name_state = NAME_NOT_KNOWN;
968 list_add(&ie->list, &cache->unknown);
969 }
970
971update:
972 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300973 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200974 ie->name_state = NAME_KNOWN;
975 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 }
977
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200978 memcpy(&ie->data, data, sizeof(*data));
979 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200981
982 if (ie->name_state == NAME_NOT_KNOWN)
983 return false;
984
985 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986}
987
988static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
989{
Johan Hedberg30883512012-01-04 14:16:21 +0200990 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 struct inquiry_info *info = (struct inquiry_info *) buf;
992 struct inquiry_entry *e;
993 int copied = 0;
994
Johan Hedberg561aafb2012-01-04 13:31:59 +0200995 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200997
998 if (copied >= num)
999 break;
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 bacpy(&info->bdaddr, &data->bdaddr);
1002 info->pscan_rep_mode = data->pscan_rep_mode;
1003 info->pscan_period_mode = data->pscan_period_mode;
1004 info->pscan_mode = data->pscan_mode;
1005 memcpy(info->dev_class, data->dev_class, 3);
1006 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001009 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 }
1011
1012 BT_DBG("cache %p, copied %d", cache, copied);
1013 return copied;
1014}
1015
Johan Hedberg42c6b122013-03-05 20:37:49 +02001016static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017{
1018 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001019 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 struct hci_cp_inquiry cp;
1021
1022 BT_DBG("%s", hdev->name);
1023
1024 if (test_bit(HCI_INQUIRY, &hdev->flags))
1025 return;
1026
1027 /* Start Inquiry */
1028 memcpy(&cp.lap, &ir->lap, 3);
1029 cp.length = ir->length;
1030 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032}
1033
Andre Guedes3e13fa12013-03-27 20:04:56 -03001034static int wait_inquiry(void *word)
1035{
1036 schedule();
1037 return signal_pending(current);
1038}
1039
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040int hci_inquiry(void __user *arg)
1041{
1042 __u8 __user *ptr = arg;
1043 struct hci_inquiry_req ir;
1044 struct hci_dev *hdev;
1045 int err = 0, do_inquiry = 0, max_rsp;
1046 long timeo;
1047 __u8 *buf;
1048
1049 if (copy_from_user(&ir, ptr, sizeof(ir)))
1050 return -EFAULT;
1051
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001052 hdev = hci_dev_get(ir.dev_id);
1053 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 return -ENODEV;
1055
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001056 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1057 err = -EBUSY;
1058 goto done;
1059 }
1060
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001061 if (hdev->dev_type != HCI_BREDR) {
1062 err = -EOPNOTSUPP;
1063 goto done;
1064 }
1065
Johan Hedberg56f87902013-10-02 13:43:13 +03001066 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1067 err = -EOPNOTSUPP;
1068 goto done;
1069 }
1070
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001071 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001072 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001073 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001074 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 do_inquiry = 1;
1076 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001077 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Marcel Holtmann04837f62006-07-03 10:02:33 +02001079 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001080
1081 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001082 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1083 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001084 if (err < 0)
1085 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001086
1087 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1088 * cleared). If it is interrupted by a signal, return -EINTR.
1089 */
1090 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1091 TASK_INTERRUPTIBLE))
1092 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001093 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001095 /* for unlimited number of responses we will use buffer with
1096 * 255 entries
1097 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1099
1100 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1101 * copy it to the user space.
1102 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001103 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001104 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 err = -ENOMEM;
1106 goto done;
1107 }
1108
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001109 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001111 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112
1113 BT_DBG("num_rsp %d", ir.num_rsp);
1114
1115 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1116 ptr += sizeof(ir);
1117 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001118 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001120 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 err = -EFAULT;
1122
1123 kfree(buf);
1124
1125done:
1126 hci_dev_put(hdev);
1127 return err;
1128}
1129
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001130static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1131{
1132 u8 ad_len = 0, flags = 0;
1133 size_t name_len;
1134
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001135 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001136 flags |= LE_AD_GENERAL;
1137
Johan Hedberg11802b22013-10-02 16:02:24 +03001138 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1139 if (lmp_le_br_capable(hdev))
1140 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1141 if (lmp_host_le_br_capable(hdev))
1142 flags |= LE_AD_SIM_LE_BREDR_HOST;
1143 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001144 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001145 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001146
1147 if (flags) {
1148 BT_DBG("adv flags 0x%02x", flags);
1149
1150 ptr[0] = 2;
1151 ptr[1] = EIR_FLAGS;
1152 ptr[2] = flags;
1153
1154 ad_len += 3;
1155 ptr += 3;
1156 }
1157
1158 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1159 ptr[0] = 2;
1160 ptr[1] = EIR_TX_POWER;
1161 ptr[2] = (u8) hdev->adv_tx_power;
1162
1163 ad_len += 3;
1164 ptr += 3;
1165 }
1166
1167 name_len = strlen(hdev->dev_name);
1168 if (name_len > 0) {
1169 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1170
1171 if (name_len > max_len) {
1172 name_len = max_len;
1173 ptr[1] = EIR_NAME_SHORT;
1174 } else
1175 ptr[1] = EIR_NAME_COMPLETE;
1176
1177 ptr[0] = name_len + 1;
1178
1179 memcpy(ptr + 2, hdev->dev_name, name_len);
1180
1181 ad_len += (name_len + 2);
1182 ptr += (name_len + 2);
1183 }
1184
1185 return ad_len;
1186}
1187
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001188void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001189{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001190 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001191 struct hci_cp_le_set_adv_data cp;
1192 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001193
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001194 if (!lmp_le_capable(hdev))
1195 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001196
1197 memset(&cp, 0, sizeof(cp));
1198
1199 len = create_ad(hdev, cp.data);
1200
1201 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001202 memcmp(cp.data, hdev->adv_data, len) == 0)
1203 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001204
1205 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1206 hdev->adv_data_len = len;
1207
1208 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001209
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001210 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001211}
1212
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001213static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 int ret = 0;
1216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 BT_DBG("%s %p", hdev->name, hdev);
1218
1219 hci_req_lock(hdev);
1220
Johan Hovold94324962012-03-15 14:48:41 +01001221 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1222 ret = -ENODEV;
1223 goto done;
1224 }
1225
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001226 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1227 /* Check for rfkill but allow the HCI setup stage to
1228 * proceed (which in itself doesn't cause any RF activity).
1229 */
1230 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1231 ret = -ERFKILL;
1232 goto done;
1233 }
1234
1235 /* Check for valid public address or a configured static
1236 * random adddress, but let the HCI setup proceed to
1237 * be able to determine if there is a public address
1238 * or not.
1239 *
1240 * This check is only valid for BR/EDR controllers
1241 * since AMP controllers do not have an address.
1242 */
1243 if (hdev->dev_type == HCI_BREDR &&
1244 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1245 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1246 ret = -EADDRNOTAVAIL;
1247 goto done;
1248 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001249 }
1250
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 if (test_bit(HCI_UP, &hdev->flags)) {
1252 ret = -EALREADY;
1253 goto done;
1254 }
1255
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 if (hdev->open(hdev)) {
1257 ret = -EIO;
1258 goto done;
1259 }
1260
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001261 atomic_set(&hdev->cmd_cnt, 1);
1262 set_bit(HCI_INIT, &hdev->flags);
1263
1264 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1265 ret = hdev->setup(hdev);
1266
1267 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001268 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1269 set_bit(HCI_RAW, &hdev->flags);
1270
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001271 if (!test_bit(HCI_RAW, &hdev->flags) &&
1272 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001273 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 }
1275
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001276 clear_bit(HCI_INIT, &hdev->flags);
1277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 if (!ret) {
1279 hci_dev_hold(hdev);
1280 set_bit(HCI_UP, &hdev->flags);
1281 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001282 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001283 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001284 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001285 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001286 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001287 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001288 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001289 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001291 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001292 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001293 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294
1295 skb_queue_purge(&hdev->cmd_q);
1296 skb_queue_purge(&hdev->rx_q);
1297
1298 if (hdev->flush)
1299 hdev->flush(hdev);
1300
1301 if (hdev->sent_cmd) {
1302 kfree_skb(hdev->sent_cmd);
1303 hdev->sent_cmd = NULL;
1304 }
1305
1306 hdev->close(hdev);
1307 hdev->flags = 0;
1308 }
1309
1310done:
1311 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 return ret;
1313}
1314
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001315/* ---- HCI ioctl helpers ---- */
1316
1317int hci_dev_open(__u16 dev)
1318{
1319 struct hci_dev *hdev;
1320 int err;
1321
1322 hdev = hci_dev_get(dev);
1323 if (!hdev)
1324 return -ENODEV;
1325
Johan Hedberge1d08f42013-10-01 22:44:50 +03001326 /* We need to ensure that no other power on/off work is pending
1327 * before proceeding to call hci_dev_do_open. This is
1328 * particularly important if the setup procedure has not yet
1329 * completed.
1330 */
1331 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1332 cancel_delayed_work(&hdev->power_off);
1333
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001334 /* After this call it is guaranteed that the setup procedure
1335 * has finished. This means that error conditions like RFKILL
1336 * or no valid public or static random address apply.
1337 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001338 flush_workqueue(hdev->req_workqueue);
1339
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001340 err = hci_dev_do_open(hdev);
1341
1342 hci_dev_put(hdev);
1343
1344 return err;
1345}
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347static int hci_dev_do_close(struct hci_dev *hdev)
1348{
1349 BT_DBG("%s %p", hdev->name, hdev);
1350
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001351 cancel_delayed_work(&hdev->power_off);
1352
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 hci_req_cancel(hdev, ENODEV);
1354 hci_req_lock(hdev);
1355
1356 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001357 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 hci_req_unlock(hdev);
1359 return 0;
1360 }
1361
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001362 /* Flush RX and TX works */
1363 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001364 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001366 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001367 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001368 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001369 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001370 }
1371
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001372 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001373 cancel_delayed_work(&hdev->service_cache);
1374
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001375 cancel_delayed_work_sync(&hdev->le_scan_disable);
1376
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001377 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001378 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001380 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
1382 hci_notify(hdev, HCI_DEV_DOWN);
1383
1384 if (hdev->flush)
1385 hdev->flush(hdev);
1386
1387 /* Reset device */
1388 skb_queue_purge(&hdev->cmd_q);
1389 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001390 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001391 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001392 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001394 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 clear_bit(HCI_INIT, &hdev->flags);
1396 }
1397
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001398 /* flush cmd work */
1399 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
1401 /* Drop queues */
1402 skb_queue_purge(&hdev->rx_q);
1403 skb_queue_purge(&hdev->cmd_q);
1404 skb_queue_purge(&hdev->raw_q);
1405
1406 /* Drop last sent command */
1407 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001408 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 kfree_skb(hdev->sent_cmd);
1410 hdev->sent_cmd = NULL;
1411 }
1412
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001413 kfree_skb(hdev->recv_evt);
1414 hdev->recv_evt = NULL;
1415
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 /* After this point our queues are empty
1417 * and no tasks are scheduled. */
1418 hdev->close(hdev);
1419
Johan Hedberg35b973c2013-03-15 17:06:59 -05001420 /* Clear flags */
1421 hdev->flags = 0;
1422 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1423
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001424 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1425 if (hdev->dev_type == HCI_BREDR) {
1426 hci_dev_lock(hdev);
1427 mgmt_powered(hdev, 0);
1428 hci_dev_unlock(hdev);
1429 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001430 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001431
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001432 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001433 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001434
Johan Hedberge59fda82012-02-22 18:11:53 +02001435 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001436 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001437
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 hci_req_unlock(hdev);
1439
1440 hci_dev_put(hdev);
1441 return 0;
1442}
1443
1444int hci_dev_close(__u16 dev)
1445{
1446 struct hci_dev *hdev;
1447 int err;
1448
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001449 hdev = hci_dev_get(dev);
1450 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001452
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001453 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1454 err = -EBUSY;
1455 goto done;
1456 }
1457
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001458 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1459 cancel_delayed_work(&hdev->power_off);
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001462
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001463done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 hci_dev_put(hdev);
1465 return err;
1466}
1467
1468int hci_dev_reset(__u16 dev)
1469{
1470 struct hci_dev *hdev;
1471 int ret = 0;
1472
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001473 hdev = hci_dev_get(dev);
1474 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return -ENODEV;
1476
1477 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Marcel Holtmann808a0492013-08-26 20:57:58 -07001479 if (!test_bit(HCI_UP, &hdev->flags)) {
1480 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001484 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1485 ret = -EBUSY;
1486 goto done;
1487 }
1488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 /* Drop queues */
1490 skb_queue_purge(&hdev->rx_q);
1491 skb_queue_purge(&hdev->cmd_q);
1492
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001493 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001494 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001496 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
1498 if (hdev->flush)
1499 hdev->flush(hdev);
1500
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001501 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001502 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
1504 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001505 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506
1507done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 hci_req_unlock(hdev);
1509 hci_dev_put(hdev);
1510 return ret;
1511}
1512
1513int hci_dev_reset_stat(__u16 dev)
1514{
1515 struct hci_dev *hdev;
1516 int ret = 0;
1517
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001518 hdev = hci_dev_get(dev);
1519 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 return -ENODEV;
1521
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001522 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1523 ret = -EBUSY;
1524 goto done;
1525 }
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1528
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001529done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 return ret;
1532}
1533
1534int hci_dev_cmd(unsigned int cmd, void __user *arg)
1535{
1536 struct hci_dev *hdev;
1537 struct hci_dev_req dr;
1538 int err = 0;
1539
1540 if (copy_from_user(&dr, arg, sizeof(dr)))
1541 return -EFAULT;
1542
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001543 hdev = hci_dev_get(dr.dev_id);
1544 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return -ENODEV;
1546
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001547 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1548 err = -EBUSY;
1549 goto done;
1550 }
1551
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001552 if (hdev->dev_type != HCI_BREDR) {
1553 err = -EOPNOTSUPP;
1554 goto done;
1555 }
1556
Johan Hedberg56f87902013-10-02 13:43:13 +03001557 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1558 err = -EOPNOTSUPP;
1559 goto done;
1560 }
1561
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 switch (cmd) {
1563 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001564 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1565 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 break;
1567
1568 case HCISETENCRYPT:
1569 if (!lmp_encrypt_capable(hdev)) {
1570 err = -EOPNOTSUPP;
1571 break;
1572 }
1573
1574 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1575 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001576 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1577 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (err)
1579 break;
1580 }
1581
Johan Hedberg01178cd2013-03-05 20:37:41 +02001582 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1583 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 break;
1585
1586 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001587 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1588 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 break;
1590
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001591 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001592 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1593 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001594 break;
1595
1596 case HCISETLINKMODE:
1597 hdev->link_mode = ((__u16) dr.dev_opt) &
1598 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1599 break;
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 case HCISETPTYPE:
1602 hdev->pkt_type = (__u16) dr.dev_opt;
1603 break;
1604
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001606 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1607 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 break;
1609
1610 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001611 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1612 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 break;
1614
1615 default:
1616 err = -EINVAL;
1617 break;
1618 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001619
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001620done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 hci_dev_put(hdev);
1622 return err;
1623}
1624
1625int hci_get_dev_list(void __user *arg)
1626{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001627 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 struct hci_dev_list_req *dl;
1629 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 int n = 0, size, err;
1631 __u16 dev_num;
1632
1633 if (get_user(dev_num, (__u16 __user *) arg))
1634 return -EFAULT;
1635
1636 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1637 return -EINVAL;
1638
1639 size = sizeof(*dl) + dev_num * sizeof(*dr);
1640
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001641 dl = kzalloc(size, GFP_KERNEL);
1642 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 return -ENOMEM;
1644
1645 dr = dl->dev_req;
1646
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001647 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001648 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001649 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001650 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001651
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001652 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1653 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001654
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 (dr + n)->dev_id = hdev->id;
1656 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001657
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 if (++n >= dev_num)
1659 break;
1660 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001661 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
1663 dl->dev_num = n;
1664 size = sizeof(*dl) + n * sizeof(*dr);
1665
1666 err = copy_to_user(arg, dl, size);
1667 kfree(dl);
1668
1669 return err ? -EFAULT : 0;
1670}
1671
1672int hci_get_dev_info(void __user *arg)
1673{
1674 struct hci_dev *hdev;
1675 struct hci_dev_info di;
1676 int err = 0;
1677
1678 if (copy_from_user(&di, arg, sizeof(di)))
1679 return -EFAULT;
1680
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001681 hdev = hci_dev_get(di.dev_id);
1682 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 return -ENODEV;
1684
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001685 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001686 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001687
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001688 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1689 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 strcpy(di.name, hdev->name);
1692 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001693 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 di.flags = hdev->flags;
1695 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001696 if (lmp_bredr_capable(hdev)) {
1697 di.acl_mtu = hdev->acl_mtu;
1698 di.acl_pkts = hdev->acl_pkts;
1699 di.sco_mtu = hdev->sco_mtu;
1700 di.sco_pkts = hdev->sco_pkts;
1701 } else {
1702 di.acl_mtu = hdev->le_mtu;
1703 di.acl_pkts = hdev->le_pkts;
1704 di.sco_mtu = 0;
1705 di.sco_pkts = 0;
1706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 di.link_policy = hdev->link_policy;
1708 di.link_mode = hdev->link_mode;
1709
1710 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1711 memcpy(&di.features, &hdev->features, sizeof(di.features));
1712
1713 if (copy_to_user(arg, &di, sizeof(di)))
1714 err = -EFAULT;
1715
1716 hci_dev_put(hdev);
1717
1718 return err;
1719}
1720
1721/* ---- Interface to HCI drivers ---- */
1722
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001723static int hci_rfkill_set_block(void *data, bool blocked)
1724{
1725 struct hci_dev *hdev = data;
1726
1727 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1728
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001729 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1730 return -EBUSY;
1731
Johan Hedberg5e130362013-09-13 08:58:17 +03001732 if (blocked) {
1733 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001734 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1735 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001736 } else {
1737 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001738 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001739
1740 return 0;
1741}
1742
1743static const struct rfkill_ops hci_rfkill_ops = {
1744 .set_block = hci_rfkill_set_block,
1745};
1746
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001747static void hci_power_on(struct work_struct *work)
1748{
1749 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001750 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001751
1752 BT_DBG("%s", hdev->name);
1753
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001754 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001755 if (err < 0) {
1756 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001757 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001758 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001759
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001760 /* During the HCI setup phase, a few error conditions are
1761 * ignored and they need to be checked now. If they are still
1762 * valid, it is important to turn the device back off.
1763 */
1764 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1765 (hdev->dev_type == HCI_BREDR &&
1766 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1767 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001768 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1769 hci_dev_do_close(hdev);
1770 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001771 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1772 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001773 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001774
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001775 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001776 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001777}
1778
1779static void hci_power_off(struct work_struct *work)
1780{
Johan Hedberg32435532011-11-07 22:16:04 +02001781 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001782 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001783
1784 BT_DBG("%s", hdev->name);
1785
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001786 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001787}
1788
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001789static void hci_discov_off(struct work_struct *work)
1790{
1791 struct hci_dev *hdev;
Marcel Holtmannb1e73122013-10-15 06:33:51 -07001792 struct hci_request req;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001793 u8 scan = SCAN_PAGE;
1794
1795 hdev = container_of(work, struct hci_dev, discov_off.work);
1796
1797 BT_DBG("%s", hdev->name);
1798
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001799 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001800
Marcel Holtmannb1e73122013-10-15 06:33:51 -07001801 hci_req_init(&req, hdev);
1802 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1803 hci_req_run(&req, NULL);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001804
1805 hdev->discov_timeout = 0;
1806
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001807 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001808}
1809
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001810int hci_uuids_clear(struct hci_dev *hdev)
1811{
Johan Hedberg48210022013-01-27 00:31:28 +02001812 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001813
Johan Hedberg48210022013-01-27 00:31:28 +02001814 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1815 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001816 kfree(uuid);
1817 }
1818
1819 return 0;
1820}
1821
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001822int hci_link_keys_clear(struct hci_dev *hdev)
1823{
1824 struct list_head *p, *n;
1825
1826 list_for_each_safe(p, n, &hdev->link_keys) {
1827 struct link_key *key;
1828
1829 key = list_entry(p, struct link_key, list);
1830
1831 list_del(p);
1832 kfree(key);
1833 }
1834
1835 return 0;
1836}
1837
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001838int hci_smp_ltks_clear(struct hci_dev *hdev)
1839{
1840 struct smp_ltk *k, *tmp;
1841
1842 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1843 list_del(&k->list);
1844 kfree(k);
1845 }
1846
1847 return 0;
1848}
1849
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001850struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1851{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001852 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001853
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001854 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001855 if (bacmp(bdaddr, &k->bdaddr) == 0)
1856 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001857
1858 return NULL;
1859}
1860
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301861static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001862 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001863{
1864 /* Legacy key */
1865 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301866 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001867
1868 /* Debug keys are insecure so don't store them persistently */
1869 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301870 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001871
1872 /* Changed combination key and there's no previous one */
1873 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301874 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001875
1876 /* Security mode 3 case */
1877 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301878 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001879
1880 /* Neither local nor remote side had no-bonding as requirement */
1881 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301882 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001883
1884 /* Local side had dedicated bonding as requirement */
1885 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301886 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001887
1888 /* Remote side had dedicated bonding as requirement */
1889 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301890 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001891
1892 /* If none of the above criteria match, then don't store the key
1893 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301894 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001895}
1896
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001897struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001898{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001899 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001900
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001901 list_for_each_entry(k, &hdev->long_term_keys, list) {
1902 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001903 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001904 continue;
1905
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001906 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001907 }
1908
1909 return NULL;
1910}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001911
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001912struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001913 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001914{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001915 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001916
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001917 list_for_each_entry(k, &hdev->long_term_keys, list)
1918 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001919 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001920 return k;
1921
1922 return NULL;
1923}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001924
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001925int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001926 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001927{
1928 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301929 u8 old_key_type;
1930 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001931
1932 old_key = hci_find_link_key(hdev, bdaddr);
1933 if (old_key) {
1934 old_key_type = old_key->type;
1935 key = old_key;
1936 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001937 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001938 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1939 if (!key)
1940 return -ENOMEM;
1941 list_add(&key->list, &hdev->link_keys);
1942 }
1943
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001944 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001945
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001946 /* Some buggy controller combinations generate a changed
1947 * combination key for legacy pairing even when there's no
1948 * previous key */
1949 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001950 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001951 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001952 if (conn)
1953 conn->key_type = type;
1954 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001955
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001956 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001957 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001958 key->pin_len = pin_len;
1959
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001960 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001961 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001962 else
1963 key->type = type;
1964
Johan Hedberg4df378a2011-04-28 11:29:03 -07001965 if (!new_key)
1966 return 0;
1967
1968 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1969
Johan Hedberg744cf192011-11-08 20:40:14 +02001970 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001971
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301972 if (conn)
1973 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001974
1975 return 0;
1976}
1977
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001978int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001979 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001980 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001981{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001982 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001983
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001984 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1985 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001986
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001987 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1988 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001989 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001990 else {
1991 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001992 if (!key)
1993 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001994 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001995 }
1996
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001997 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001998 key->bdaddr_type = addr_type;
1999 memcpy(key->val, tk, sizeof(key->val));
2000 key->authenticated = authenticated;
2001 key->ediv = ediv;
2002 key->enc_size = enc_size;
2003 key->type = type;
2004 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002005
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002006 if (!new_key)
2007 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002008
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002009 if (type & HCI_SMP_LTK)
2010 mgmt_new_ltk(hdev, key, 1);
2011
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002012 return 0;
2013}
2014
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002015int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2016{
2017 struct link_key *key;
2018
2019 key = hci_find_link_key(hdev, bdaddr);
2020 if (!key)
2021 return -ENOENT;
2022
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002023 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002024
2025 list_del(&key->list);
2026 kfree(key);
2027
2028 return 0;
2029}
2030
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002031int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2032{
2033 struct smp_ltk *k, *tmp;
2034
2035 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2036 if (bacmp(bdaddr, &k->bdaddr))
2037 continue;
2038
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002039 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002040
2041 list_del(&k->list);
2042 kfree(k);
2043 }
2044
2045 return 0;
2046}
2047
Ville Tervo6bd32322011-02-16 16:32:41 +02002048/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002049static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002050{
2051 struct hci_dev *hdev = (void *) arg;
2052
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002053 if (hdev->sent_cmd) {
2054 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2055 u16 opcode = __le16_to_cpu(sent->opcode);
2056
2057 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2058 } else {
2059 BT_ERR("%s command tx timeout", hdev->name);
2060 }
2061
Ville Tervo6bd32322011-02-16 16:32:41 +02002062 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002063 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002064}
2065
Szymon Janc2763eda2011-03-22 13:12:22 +01002066struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002067 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002068{
2069 struct oob_data *data;
2070
2071 list_for_each_entry(data, &hdev->remote_oob_data, list)
2072 if (bacmp(bdaddr, &data->bdaddr) == 0)
2073 return data;
2074
2075 return NULL;
2076}
2077
2078int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2079{
2080 struct oob_data *data;
2081
2082 data = hci_find_remote_oob_data(hdev, bdaddr);
2083 if (!data)
2084 return -ENOENT;
2085
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002086 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002087
2088 list_del(&data->list);
2089 kfree(data);
2090
2091 return 0;
2092}
2093
2094int hci_remote_oob_data_clear(struct hci_dev *hdev)
2095{
2096 struct oob_data *data, *n;
2097
2098 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2099 list_del(&data->list);
2100 kfree(data);
2101 }
2102
2103 return 0;
2104}
2105
2106int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002107 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002108{
2109 struct oob_data *data;
2110
2111 data = hci_find_remote_oob_data(hdev, bdaddr);
2112
2113 if (!data) {
2114 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2115 if (!data)
2116 return -ENOMEM;
2117
2118 bacpy(&data->bdaddr, bdaddr);
2119 list_add(&data->list, &hdev->remote_oob_data);
2120 }
2121
2122 memcpy(data->hash, hash, sizeof(data->hash));
2123 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2124
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002125 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002126
2127 return 0;
2128}
2129
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002130struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002131{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002132 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002133
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002134 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002135 if (bacmp(bdaddr, &b->bdaddr) == 0)
2136 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002137
2138 return NULL;
2139}
2140
2141int hci_blacklist_clear(struct hci_dev *hdev)
2142{
2143 struct list_head *p, *n;
2144
2145 list_for_each_safe(p, n, &hdev->blacklist) {
2146 struct bdaddr_list *b;
2147
2148 b = list_entry(p, struct bdaddr_list, list);
2149
2150 list_del(p);
2151 kfree(b);
2152 }
2153
2154 return 0;
2155}
2156
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002157int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002158{
2159 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002160
2161 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2162 return -EBADF;
2163
Antti Julku5e762442011-08-25 16:48:02 +03002164 if (hci_blacklist_lookup(hdev, bdaddr))
2165 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002166
2167 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002168 if (!entry)
2169 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002170
2171 bacpy(&entry->bdaddr, bdaddr);
2172
2173 list_add(&entry->list, &hdev->blacklist);
2174
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002175 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002176}
2177
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002178int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002179{
2180 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002181
Szymon Janc1ec918c2011-11-16 09:32:21 +01002182 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002183 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002184
2185 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002186 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002187 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002188
2189 list_del(&entry->list);
2190 kfree(entry);
2191
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002192 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002193}
2194
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002195static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002196{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002197 if (status) {
2198 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002199
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002200 hci_dev_lock(hdev);
2201 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2202 hci_dev_unlock(hdev);
2203 return;
2204 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002205}
2206
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002207static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002208{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002209 /* General inquiry access code (GIAC) */
2210 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2211 struct hci_request req;
2212 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002213 int err;
2214
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002215 if (status) {
2216 BT_ERR("Failed to disable LE scanning: status %d", status);
2217 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002218 }
2219
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002220 switch (hdev->discovery.type) {
2221 case DISCOV_TYPE_LE:
2222 hci_dev_lock(hdev);
2223 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2224 hci_dev_unlock(hdev);
2225 break;
2226
2227 case DISCOV_TYPE_INTERLEAVED:
2228 hci_req_init(&req, hdev);
2229
2230 memset(&cp, 0, sizeof(cp));
2231 memcpy(&cp.lap, lap, sizeof(cp.lap));
2232 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2233 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2234
2235 hci_dev_lock(hdev);
2236
2237 hci_inquiry_cache_flush(hdev);
2238
2239 err = hci_req_run(&req, inquiry_complete);
2240 if (err) {
2241 BT_ERR("Inquiry request failed: err %d", err);
2242 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2243 }
2244
2245 hci_dev_unlock(hdev);
2246 break;
2247 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002248}
2249
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002250static void le_scan_disable_work(struct work_struct *work)
2251{
2252 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002253 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002254 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002255 struct hci_request req;
2256 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002257
2258 BT_DBG("%s", hdev->name);
2259
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002260 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002261
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002262 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002263 cp.enable = LE_SCAN_DISABLE;
2264 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002265
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002266 err = hci_req_run(&req, le_scan_disable_work_complete);
2267 if (err)
2268 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002269}
2270
David Herrmann9be0dab2012-04-22 14:39:57 +02002271/* Alloc HCI device */
2272struct hci_dev *hci_alloc_dev(void)
2273{
2274 struct hci_dev *hdev;
2275
2276 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2277 if (!hdev)
2278 return NULL;
2279
David Herrmannb1b813d2012-04-22 14:39:58 +02002280 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2281 hdev->esco_type = (ESCO_HV1);
2282 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002283 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2284 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002285 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2286 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002287
David Herrmannb1b813d2012-04-22 14:39:58 +02002288 hdev->sniff_max_interval = 800;
2289 hdev->sniff_min_interval = 80;
2290
Marcel Holtmannbef64732013-10-11 08:23:19 -07002291 hdev->le_scan_interval = 0x0060;
2292 hdev->le_scan_window = 0x0030;
2293
David Herrmannb1b813d2012-04-22 14:39:58 +02002294 mutex_init(&hdev->lock);
2295 mutex_init(&hdev->req_lock);
2296
2297 INIT_LIST_HEAD(&hdev->mgmt_pending);
2298 INIT_LIST_HEAD(&hdev->blacklist);
2299 INIT_LIST_HEAD(&hdev->uuids);
2300 INIT_LIST_HEAD(&hdev->link_keys);
2301 INIT_LIST_HEAD(&hdev->long_term_keys);
2302 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002303 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002304
2305 INIT_WORK(&hdev->rx_work, hci_rx_work);
2306 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2307 INIT_WORK(&hdev->tx_work, hci_tx_work);
2308 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002309
David Herrmannb1b813d2012-04-22 14:39:58 +02002310 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2311 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2312 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2313
David Herrmannb1b813d2012-04-22 14:39:58 +02002314 skb_queue_head_init(&hdev->rx_q);
2315 skb_queue_head_init(&hdev->cmd_q);
2316 skb_queue_head_init(&hdev->raw_q);
2317
2318 init_waitqueue_head(&hdev->req_wait_q);
2319
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002320 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002321
David Herrmannb1b813d2012-04-22 14:39:58 +02002322 hci_init_sysfs(hdev);
2323 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002324
2325 return hdev;
2326}
2327EXPORT_SYMBOL(hci_alloc_dev);
2328
2329/* Free HCI device */
2330void hci_free_dev(struct hci_dev *hdev)
2331{
David Herrmann9be0dab2012-04-22 14:39:57 +02002332 /* will free via device release */
2333 put_device(&hdev->dev);
2334}
2335EXPORT_SYMBOL(hci_free_dev);
2336
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337/* Register HCI device */
2338int hci_register_dev(struct hci_dev *hdev)
2339{
David Herrmannb1b813d2012-04-22 14:39:58 +02002340 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341
David Herrmann010666a2012-01-07 15:47:07 +01002342 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 return -EINVAL;
2344
Mat Martineau08add512011-11-02 16:18:36 -07002345 /* Do not allow HCI_AMP devices to register at index 0,
2346 * so the index can be used as the AMP controller ID.
2347 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002348 switch (hdev->dev_type) {
2349 case HCI_BREDR:
2350 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2351 break;
2352 case HCI_AMP:
2353 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2354 break;
2355 default:
2356 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002358
Sasha Levin3df92b32012-05-27 22:36:56 +02002359 if (id < 0)
2360 return id;
2361
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 sprintf(hdev->name, "hci%d", id);
2363 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002364
2365 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2366
Kees Cookd8537542013-07-03 15:04:57 -07002367 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2368 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002369 if (!hdev->workqueue) {
2370 error = -ENOMEM;
2371 goto err;
2372 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002373
Kees Cookd8537542013-07-03 15:04:57 -07002374 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2375 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002376 if (!hdev->req_workqueue) {
2377 destroy_workqueue(hdev->workqueue);
2378 error = -ENOMEM;
2379 goto err;
2380 }
2381
David Herrmann33ca9542011-10-08 14:58:49 +02002382 error = hci_add_sysfs(hdev);
2383 if (error < 0)
2384 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002386 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002387 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2388 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002389 if (hdev->rfkill) {
2390 if (rfkill_register(hdev->rfkill) < 0) {
2391 rfkill_destroy(hdev->rfkill);
2392 hdev->rfkill = NULL;
2393 }
2394 }
2395
Johan Hedberg5e130362013-09-13 08:58:17 +03002396 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2397 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2398
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002399 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002400 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002401
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002402 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002403 /* Assume BR/EDR support until proven otherwise (such as
2404 * through reading supported features during init.
2405 */
2406 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2407 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002408
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002409 write_lock(&hci_dev_list_lock);
2410 list_add(&hdev->list, &hci_dev_list);
2411 write_unlock(&hci_dev_list_lock);
2412
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002414 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415
Johan Hedberg19202572013-01-14 22:33:51 +02002416 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002417
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002419
David Herrmann33ca9542011-10-08 14:58:49 +02002420err_wqueue:
2421 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002422 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002423err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002424 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002425
David Herrmann33ca9542011-10-08 14:58:49 +02002426 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427}
2428EXPORT_SYMBOL(hci_register_dev);
2429
2430/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002431void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432{
Sasha Levin3df92b32012-05-27 22:36:56 +02002433 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002434
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002435 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436
Johan Hovold94324962012-03-15 14:48:41 +01002437 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2438
Sasha Levin3df92b32012-05-27 22:36:56 +02002439 id = hdev->id;
2440
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002441 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002443 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
2445 hci_dev_do_close(hdev);
2446
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302447 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002448 kfree_skb(hdev->reassembly[i]);
2449
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002450 cancel_work_sync(&hdev->power_on);
2451
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002452 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002453 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002454 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002455 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002456 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002457 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002458
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002459 /* mgmt_index_removed should take care of emptying the
2460 * pending list */
2461 BUG_ON(!list_empty(&hdev->mgmt_pending));
2462
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 hci_notify(hdev, HCI_DEV_UNREG);
2464
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002465 if (hdev->rfkill) {
2466 rfkill_unregister(hdev->rfkill);
2467 rfkill_destroy(hdev->rfkill);
2468 }
2469
David Herrmannce242972011-10-08 14:58:48 +02002470 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002471
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002472 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002473 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002474
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002475 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002476 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002477 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002478 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002479 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002480 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002481 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002482
David Herrmanndc946bd2012-01-07 15:47:24 +01002483 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002484
2485 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486}
2487EXPORT_SYMBOL(hci_unregister_dev);
2488
2489/* Suspend HCI device */
2490int hci_suspend_dev(struct hci_dev *hdev)
2491{
2492 hci_notify(hdev, HCI_DEV_SUSPEND);
2493 return 0;
2494}
2495EXPORT_SYMBOL(hci_suspend_dev);
2496
2497/* Resume HCI device */
2498int hci_resume_dev(struct hci_dev *hdev)
2499{
2500 hci_notify(hdev, HCI_DEV_RESUME);
2501 return 0;
2502}
2503EXPORT_SYMBOL(hci_resume_dev);
2504
Marcel Holtmann76bca882009-11-18 00:40:39 +01002505/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002506int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002507{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002508 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002509 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002510 kfree_skb(skb);
2511 return -ENXIO;
2512 }
2513
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002514 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002515 bt_cb(skb)->incoming = 1;
2516
2517 /* Time stamp */
2518 __net_timestamp(skb);
2519
Marcel Holtmann76bca882009-11-18 00:40:39 +01002520 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002521 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002522
Marcel Holtmann76bca882009-11-18 00:40:39 +01002523 return 0;
2524}
2525EXPORT_SYMBOL(hci_recv_frame);
2526
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302527static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002528 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302529{
2530 int len = 0;
2531 int hlen = 0;
2532 int remain = count;
2533 struct sk_buff *skb;
2534 struct bt_skb_cb *scb;
2535
2536 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002537 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302538 return -EILSEQ;
2539
2540 skb = hdev->reassembly[index];
2541
2542 if (!skb) {
2543 switch (type) {
2544 case HCI_ACLDATA_PKT:
2545 len = HCI_MAX_FRAME_SIZE;
2546 hlen = HCI_ACL_HDR_SIZE;
2547 break;
2548 case HCI_EVENT_PKT:
2549 len = HCI_MAX_EVENT_SIZE;
2550 hlen = HCI_EVENT_HDR_SIZE;
2551 break;
2552 case HCI_SCODATA_PKT:
2553 len = HCI_MAX_SCO_SIZE;
2554 hlen = HCI_SCO_HDR_SIZE;
2555 break;
2556 }
2557
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002558 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302559 if (!skb)
2560 return -ENOMEM;
2561
2562 scb = (void *) skb->cb;
2563 scb->expect = hlen;
2564 scb->pkt_type = type;
2565
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302566 hdev->reassembly[index] = skb;
2567 }
2568
2569 while (count) {
2570 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002571 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302572
2573 memcpy(skb_put(skb, len), data, len);
2574
2575 count -= len;
2576 data += len;
2577 scb->expect -= len;
2578 remain = count;
2579
2580 switch (type) {
2581 case HCI_EVENT_PKT:
2582 if (skb->len == HCI_EVENT_HDR_SIZE) {
2583 struct hci_event_hdr *h = hci_event_hdr(skb);
2584 scb->expect = h->plen;
2585
2586 if (skb_tailroom(skb) < scb->expect) {
2587 kfree_skb(skb);
2588 hdev->reassembly[index] = NULL;
2589 return -ENOMEM;
2590 }
2591 }
2592 break;
2593
2594 case HCI_ACLDATA_PKT:
2595 if (skb->len == HCI_ACL_HDR_SIZE) {
2596 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2597 scb->expect = __le16_to_cpu(h->dlen);
2598
2599 if (skb_tailroom(skb) < scb->expect) {
2600 kfree_skb(skb);
2601 hdev->reassembly[index] = NULL;
2602 return -ENOMEM;
2603 }
2604 }
2605 break;
2606
2607 case HCI_SCODATA_PKT:
2608 if (skb->len == HCI_SCO_HDR_SIZE) {
2609 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2610 scb->expect = h->dlen;
2611
2612 if (skb_tailroom(skb) < scb->expect) {
2613 kfree_skb(skb);
2614 hdev->reassembly[index] = NULL;
2615 return -ENOMEM;
2616 }
2617 }
2618 break;
2619 }
2620
2621 if (scb->expect == 0) {
2622 /* Complete frame */
2623
2624 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002625 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302626
2627 hdev->reassembly[index] = NULL;
2628 return remain;
2629 }
2630 }
2631
2632 return remain;
2633}
2634
Marcel Holtmannef222012007-07-11 06:42:04 +02002635int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2636{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302637 int rem = 0;
2638
Marcel Holtmannef222012007-07-11 06:42:04 +02002639 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2640 return -EILSEQ;
2641
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002642 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002643 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302644 if (rem < 0)
2645 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002646
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302647 data += (count - rem);
2648 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002649 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002650
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302651 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002652}
2653EXPORT_SYMBOL(hci_recv_fragment);
2654
Suraj Sumangala99811512010-07-14 13:02:19 +05302655#define STREAM_REASSEMBLY 0
2656
2657int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2658{
2659 int type;
2660 int rem = 0;
2661
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002662 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302663 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2664
2665 if (!skb) {
2666 struct { char type; } *pkt;
2667
2668 /* Start of the frame */
2669 pkt = data;
2670 type = pkt->type;
2671
2672 data++;
2673 count--;
2674 } else
2675 type = bt_cb(skb)->pkt_type;
2676
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002677 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002678 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302679 if (rem < 0)
2680 return rem;
2681
2682 data += (count - rem);
2683 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002684 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302685
2686 return rem;
2687}
2688EXPORT_SYMBOL(hci_recv_stream_fragment);
2689
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690/* ---- Interface to upper protocols ---- */
2691
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692int hci_register_cb(struct hci_cb *cb)
2693{
2694 BT_DBG("%p name %s", cb, cb->name);
2695
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002696 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002698 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700 return 0;
2701}
2702EXPORT_SYMBOL(hci_register_cb);
2703
2704int hci_unregister_cb(struct hci_cb *cb)
2705{
2706 BT_DBG("%p name %s", cb, cb->name);
2707
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002708 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002710 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711
2712 return 0;
2713}
2714EXPORT_SYMBOL(hci_unregister_cb);
2715
Marcel Holtmann51086992013-10-10 14:54:19 -07002716static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002718 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002720 /* Time stamp */
2721 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002723 /* Send copy to monitor */
2724 hci_send_to_monitor(hdev, skb);
2725
2726 if (atomic_read(&hdev->promisc)) {
2727 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002728 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 }
2730
2731 /* Get rid of skb owner, prior to sending to the driver. */
2732 skb_orphan(skb);
2733
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002734 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002735 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736}
2737
Johan Hedberg3119ae92013-03-05 20:37:44 +02002738void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2739{
2740 skb_queue_head_init(&req->cmd_q);
2741 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002742 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002743}
2744
2745int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2746{
2747 struct hci_dev *hdev = req->hdev;
2748 struct sk_buff *skb;
2749 unsigned long flags;
2750
2751 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2752
Andre Guedes5d73e032013-03-08 11:20:16 -03002753 /* If an error occured during request building, remove all HCI
2754 * commands queued on the HCI request queue.
2755 */
2756 if (req->err) {
2757 skb_queue_purge(&req->cmd_q);
2758 return req->err;
2759 }
2760
Johan Hedberg3119ae92013-03-05 20:37:44 +02002761 /* Do not allow empty requests */
2762 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002763 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002764
2765 skb = skb_peek_tail(&req->cmd_q);
2766 bt_cb(skb)->req.complete = complete;
2767
2768 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2769 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2770 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2771
2772 queue_work(hdev->workqueue, &hdev->cmd_work);
2773
2774 return 0;
2775}
2776
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002777static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002778 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779{
2780 int len = HCI_COMMAND_HDR_SIZE + plen;
2781 struct hci_command_hdr *hdr;
2782 struct sk_buff *skb;
2783
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002785 if (!skb)
2786 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
2788 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002789 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 hdr->plen = plen;
2791
2792 if (plen)
2793 memcpy(skb_put(skb, plen), param, plen);
2794
2795 BT_DBG("skb len %d", skb->len);
2796
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002797 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002798
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002799 return skb;
2800}
2801
2802/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002803int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2804 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002805{
2806 struct sk_buff *skb;
2807
2808 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2809
2810 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2811 if (!skb) {
2812 BT_ERR("%s no memory for command", hdev->name);
2813 return -ENOMEM;
2814 }
2815
Johan Hedberg11714b32013-03-05 20:37:47 +02002816 /* Stand-alone HCI commands must be flaged as
2817 * single-command requests.
2818 */
2819 bt_cb(skb)->req.start = true;
2820
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002822 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823
2824 return 0;
2825}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826
Johan Hedberg71c76a12013-03-05 20:37:46 +02002827/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002828void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2829 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002830{
2831 struct hci_dev *hdev = req->hdev;
2832 struct sk_buff *skb;
2833
2834 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2835
Andre Guedes34739c12013-03-08 11:20:18 -03002836 /* If an error occured during request building, there is no point in
2837 * queueing the HCI command. We can simply return.
2838 */
2839 if (req->err)
2840 return;
2841
Johan Hedberg71c76a12013-03-05 20:37:46 +02002842 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2843 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002844 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2845 hdev->name, opcode);
2846 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002847 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002848 }
2849
2850 if (skb_queue_empty(&req->cmd_q))
2851 bt_cb(skb)->req.start = true;
2852
Johan Hedberg02350a72013-04-03 21:50:29 +03002853 bt_cb(skb)->req.event = event;
2854
Johan Hedberg71c76a12013-03-05 20:37:46 +02002855 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002856}
2857
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002858void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2859 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002860{
2861 hci_req_add_ev(req, opcode, plen, param, 0);
2862}
2863
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002865void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866{
2867 struct hci_command_hdr *hdr;
2868
2869 if (!hdev->sent_cmd)
2870 return NULL;
2871
2872 hdr = (void *) hdev->sent_cmd->data;
2873
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002874 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 return NULL;
2876
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002877 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
2879 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2880}
2881
2882/* Send ACL data */
2883static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2884{
2885 struct hci_acl_hdr *hdr;
2886 int len = skb->len;
2887
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002888 skb_push(skb, HCI_ACL_HDR_SIZE);
2889 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002890 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002891 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2892 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893}
2894
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002895static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002896 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002898 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 struct hci_dev *hdev = conn->hdev;
2900 struct sk_buff *list;
2901
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002902 skb->len = skb_headlen(skb);
2903 skb->data_len = 0;
2904
2905 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002906
2907 switch (hdev->dev_type) {
2908 case HCI_BREDR:
2909 hci_add_acl_hdr(skb, conn->handle, flags);
2910 break;
2911 case HCI_AMP:
2912 hci_add_acl_hdr(skb, chan->handle, flags);
2913 break;
2914 default:
2915 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2916 return;
2917 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002918
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002919 list = skb_shinfo(skb)->frag_list;
2920 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 /* Non fragmented */
2922 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2923
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002924 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 } else {
2926 /* Fragmented */
2927 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2928
2929 skb_shinfo(skb)->frag_list = NULL;
2930
2931 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002932 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002934 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002935
2936 flags &= ~ACL_START;
2937 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 do {
2939 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002940
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002941 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002942 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
2944 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2945
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002946 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 } while (list);
2948
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002949 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002951}
2952
2953void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2954{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002955 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002956
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002957 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002958
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002959 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002961 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963
2964/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002965void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966{
2967 struct hci_dev *hdev = conn->hdev;
2968 struct hci_sco_hdr hdr;
2969
2970 BT_DBG("%s len %d", hdev->name, skb->len);
2971
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002972 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 hdr.dlen = skb->len;
2974
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002975 skb_push(skb, HCI_SCO_HDR_SIZE);
2976 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002977 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002979 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002980
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002982 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984
2985/* ---- HCI TX task (outgoing data) ---- */
2986
2987/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002988static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2989 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990{
2991 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002992 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002993 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002995 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002997
2998 rcu_read_lock();
2999
3000 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003001 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003003
3004 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3005 continue;
3006
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 num++;
3008
3009 if (c->sent < min) {
3010 min = c->sent;
3011 conn = c;
3012 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003013
3014 if (hci_conn_num(hdev, type) == num)
3015 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 }
3017
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003018 rcu_read_unlock();
3019
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003021 int cnt, q;
3022
3023 switch (conn->type) {
3024 case ACL_LINK:
3025 cnt = hdev->acl_cnt;
3026 break;
3027 case SCO_LINK:
3028 case ESCO_LINK:
3029 cnt = hdev->sco_cnt;
3030 break;
3031 case LE_LINK:
3032 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3033 break;
3034 default:
3035 cnt = 0;
3036 BT_ERR("Unknown link type");
3037 }
3038
3039 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 *quote = q ? q : 1;
3041 } else
3042 *quote = 0;
3043
3044 BT_DBG("conn %p quote %d", conn, *quote);
3045 return conn;
3046}
3047
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003048static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049{
3050 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003051 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052
Ville Tervobae1f5d92011-02-10 22:38:53 -03003053 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003055 rcu_read_lock();
3056
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003058 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003059 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003060 BT_ERR("%s killing stalled connection %pMR",
3061 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003062 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 }
3064 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003065
3066 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067}
3068
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003069static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3070 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003071{
3072 struct hci_conn_hash *h = &hdev->conn_hash;
3073 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003074 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003075 struct hci_conn *conn;
3076 int cnt, q, conn_num = 0;
3077
3078 BT_DBG("%s", hdev->name);
3079
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003080 rcu_read_lock();
3081
3082 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003083 struct hci_chan *tmp;
3084
3085 if (conn->type != type)
3086 continue;
3087
3088 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3089 continue;
3090
3091 conn_num++;
3092
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003093 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003094 struct sk_buff *skb;
3095
3096 if (skb_queue_empty(&tmp->data_q))
3097 continue;
3098
3099 skb = skb_peek(&tmp->data_q);
3100 if (skb->priority < cur_prio)
3101 continue;
3102
3103 if (skb->priority > cur_prio) {
3104 num = 0;
3105 min = ~0;
3106 cur_prio = skb->priority;
3107 }
3108
3109 num++;
3110
3111 if (conn->sent < min) {
3112 min = conn->sent;
3113 chan = tmp;
3114 }
3115 }
3116
3117 if (hci_conn_num(hdev, type) == conn_num)
3118 break;
3119 }
3120
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003121 rcu_read_unlock();
3122
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003123 if (!chan)
3124 return NULL;
3125
3126 switch (chan->conn->type) {
3127 case ACL_LINK:
3128 cnt = hdev->acl_cnt;
3129 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003130 case AMP_LINK:
3131 cnt = hdev->block_cnt;
3132 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003133 case SCO_LINK:
3134 case ESCO_LINK:
3135 cnt = hdev->sco_cnt;
3136 break;
3137 case LE_LINK:
3138 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3139 break;
3140 default:
3141 cnt = 0;
3142 BT_ERR("Unknown link type");
3143 }
3144
3145 q = cnt / num;
3146 *quote = q ? q : 1;
3147 BT_DBG("chan %p quote %d", chan, *quote);
3148 return chan;
3149}
3150
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003151static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3152{
3153 struct hci_conn_hash *h = &hdev->conn_hash;
3154 struct hci_conn *conn;
3155 int num = 0;
3156
3157 BT_DBG("%s", hdev->name);
3158
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003159 rcu_read_lock();
3160
3161 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003162 struct hci_chan *chan;
3163
3164 if (conn->type != type)
3165 continue;
3166
3167 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3168 continue;
3169
3170 num++;
3171
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003172 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003173 struct sk_buff *skb;
3174
3175 if (chan->sent) {
3176 chan->sent = 0;
3177 continue;
3178 }
3179
3180 if (skb_queue_empty(&chan->data_q))
3181 continue;
3182
3183 skb = skb_peek(&chan->data_q);
3184 if (skb->priority >= HCI_PRIO_MAX - 1)
3185 continue;
3186
3187 skb->priority = HCI_PRIO_MAX - 1;
3188
3189 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003190 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003191 }
3192
3193 if (hci_conn_num(hdev, type) == num)
3194 break;
3195 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003196
3197 rcu_read_unlock();
3198
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003199}
3200
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003201static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3202{
3203 /* Calculate count of blocks used by this packet */
3204 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3205}
3206
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003207static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 if (!test_bit(HCI_RAW, &hdev->flags)) {
3210 /* ACL tx timeout must be longer than maximum
3211 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003212 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003213 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003214 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003216}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003218static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003219{
3220 unsigned int cnt = hdev->acl_cnt;
3221 struct hci_chan *chan;
3222 struct sk_buff *skb;
3223 int quote;
3224
3225 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003226
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003227 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003228 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003229 u32 priority = (skb_peek(&chan->data_q))->priority;
3230 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003231 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003232 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003233
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003234 /* Stop if priority has changed */
3235 if (skb->priority < priority)
3236 break;
3237
3238 skb = skb_dequeue(&chan->data_q);
3239
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003240 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003241 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003242
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003243 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 hdev->acl_last_tx = jiffies;
3245
3246 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003247 chan->sent++;
3248 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 }
3250 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003251
3252 if (cnt != hdev->acl_cnt)
3253 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254}
3255
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003256static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003257{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003258 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003259 struct hci_chan *chan;
3260 struct sk_buff *skb;
3261 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003262 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003263
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003264 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003265
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003266 BT_DBG("%s", hdev->name);
3267
3268 if (hdev->dev_type == HCI_AMP)
3269 type = AMP_LINK;
3270 else
3271 type = ACL_LINK;
3272
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003273 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003274 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003275 u32 priority = (skb_peek(&chan->data_q))->priority;
3276 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3277 int blocks;
3278
3279 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003280 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003281
3282 /* Stop if priority has changed */
3283 if (skb->priority < priority)
3284 break;
3285
3286 skb = skb_dequeue(&chan->data_q);
3287
3288 blocks = __get_blocks(hdev, skb);
3289 if (blocks > hdev->block_cnt)
3290 return;
3291
3292 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003293 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003294
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003295 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003296 hdev->acl_last_tx = jiffies;
3297
3298 hdev->block_cnt -= blocks;
3299 quote -= blocks;
3300
3301 chan->sent += blocks;
3302 chan->conn->sent += blocks;
3303 }
3304 }
3305
3306 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003307 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003308}
3309
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003310static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003311{
3312 BT_DBG("%s", hdev->name);
3313
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003314 /* No ACL link over BR/EDR controller */
3315 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3316 return;
3317
3318 /* No AMP link over AMP controller */
3319 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003320 return;
3321
3322 switch (hdev->flow_ctl_mode) {
3323 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3324 hci_sched_acl_pkt(hdev);
3325 break;
3326
3327 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3328 hci_sched_acl_blk(hdev);
3329 break;
3330 }
3331}
3332
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003334static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335{
3336 struct hci_conn *conn;
3337 struct sk_buff *skb;
3338 int quote;
3339
3340 BT_DBG("%s", hdev->name);
3341
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003342 if (!hci_conn_num(hdev, SCO_LINK))
3343 return;
3344
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3346 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3347 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003348 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349
3350 conn->sent++;
3351 if (conn->sent == ~0)
3352 conn->sent = 0;
3353 }
3354 }
3355}
3356
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003357static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003358{
3359 struct hci_conn *conn;
3360 struct sk_buff *skb;
3361 int quote;
3362
3363 BT_DBG("%s", hdev->name);
3364
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003365 if (!hci_conn_num(hdev, ESCO_LINK))
3366 return;
3367
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003368 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3369 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003370 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3371 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003372 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003373
3374 conn->sent++;
3375 if (conn->sent == ~0)
3376 conn->sent = 0;
3377 }
3378 }
3379}
3380
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003381static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003382{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003383 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003384 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003385 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003386
3387 BT_DBG("%s", hdev->name);
3388
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003389 if (!hci_conn_num(hdev, LE_LINK))
3390 return;
3391
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003392 if (!test_bit(HCI_RAW, &hdev->flags)) {
3393 /* LE tx timeout must be longer than maximum
3394 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003395 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003396 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003397 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003398 }
3399
3400 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003401 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003402 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003403 u32 priority = (skb_peek(&chan->data_q))->priority;
3404 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003405 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003406 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003407
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003408 /* Stop if priority has changed */
3409 if (skb->priority < priority)
3410 break;
3411
3412 skb = skb_dequeue(&chan->data_q);
3413
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003414 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003415 hdev->le_last_tx = jiffies;
3416
3417 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003418 chan->sent++;
3419 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003420 }
3421 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003422
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003423 if (hdev->le_pkts)
3424 hdev->le_cnt = cnt;
3425 else
3426 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003427
3428 if (cnt != tmp)
3429 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003430}
3431
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003432static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003434 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 struct sk_buff *skb;
3436
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003437 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003438 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439
Marcel Holtmann52de5992013-09-03 18:08:38 -07003440 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3441 /* Schedule queues and send stuff to HCI driver */
3442 hci_sched_acl(hdev);
3443 hci_sched_sco(hdev);
3444 hci_sched_esco(hdev);
3445 hci_sched_le(hdev);
3446 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003447
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 /* Send next queued raw (unknown type) packet */
3449 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003450 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451}
3452
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003453/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454
3455/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003456static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457{
3458 struct hci_acl_hdr *hdr = (void *) skb->data;
3459 struct hci_conn *conn;
3460 __u16 handle, flags;
3461
3462 skb_pull(skb, HCI_ACL_HDR_SIZE);
3463
3464 handle = __le16_to_cpu(hdr->handle);
3465 flags = hci_flags(handle);
3466 handle = hci_handle(handle);
3467
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003468 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003469 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470
3471 hdev->stat.acl_rx++;
3472
3473 hci_dev_lock(hdev);
3474 conn = hci_conn_hash_lookup_handle(hdev, handle);
3475 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003476
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003478 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003479
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003481 l2cap_recv_acldata(conn, skb, flags);
3482 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003484 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003485 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 }
3487
3488 kfree_skb(skb);
3489}
3490
3491/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003492static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493{
3494 struct hci_sco_hdr *hdr = (void *) skb->data;
3495 struct hci_conn *conn;
3496 __u16 handle;
3497
3498 skb_pull(skb, HCI_SCO_HDR_SIZE);
3499
3500 handle = __le16_to_cpu(hdr->handle);
3501
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003502 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503
3504 hdev->stat.sco_rx++;
3505
3506 hci_dev_lock(hdev);
3507 conn = hci_conn_hash_lookup_handle(hdev, handle);
3508 hci_dev_unlock(hdev);
3509
3510 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003512 sco_recv_scodata(conn, skb);
3513 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003515 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003516 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 }
3518
3519 kfree_skb(skb);
3520}
3521
Johan Hedberg9238f362013-03-05 20:37:48 +02003522static bool hci_req_is_complete(struct hci_dev *hdev)
3523{
3524 struct sk_buff *skb;
3525
3526 skb = skb_peek(&hdev->cmd_q);
3527 if (!skb)
3528 return true;
3529
3530 return bt_cb(skb)->req.start;
3531}
3532
Johan Hedberg42c6b122013-03-05 20:37:49 +02003533static void hci_resend_last(struct hci_dev *hdev)
3534{
3535 struct hci_command_hdr *sent;
3536 struct sk_buff *skb;
3537 u16 opcode;
3538
3539 if (!hdev->sent_cmd)
3540 return;
3541
3542 sent = (void *) hdev->sent_cmd->data;
3543 opcode = __le16_to_cpu(sent->opcode);
3544 if (opcode == HCI_OP_RESET)
3545 return;
3546
3547 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3548 if (!skb)
3549 return;
3550
3551 skb_queue_head(&hdev->cmd_q, skb);
3552 queue_work(hdev->workqueue, &hdev->cmd_work);
3553}
3554
Johan Hedberg9238f362013-03-05 20:37:48 +02003555void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3556{
3557 hci_req_complete_t req_complete = NULL;
3558 struct sk_buff *skb;
3559 unsigned long flags;
3560
3561 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3562
Johan Hedberg42c6b122013-03-05 20:37:49 +02003563 /* If the completed command doesn't match the last one that was
3564 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003565 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003566 if (!hci_sent_cmd_data(hdev, opcode)) {
3567 /* Some CSR based controllers generate a spontaneous
3568 * reset complete event during init and any pending
3569 * command will never be completed. In such a case we
3570 * need to resend whatever was the last sent
3571 * command.
3572 */
3573 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3574 hci_resend_last(hdev);
3575
Johan Hedberg9238f362013-03-05 20:37:48 +02003576 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003577 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003578
3579 /* If the command succeeded and there's still more commands in
3580 * this request the request is not yet complete.
3581 */
3582 if (!status && !hci_req_is_complete(hdev))
3583 return;
3584
3585 /* If this was the last command in a request the complete
3586 * callback would be found in hdev->sent_cmd instead of the
3587 * command queue (hdev->cmd_q).
3588 */
3589 if (hdev->sent_cmd) {
3590 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003591
3592 if (req_complete) {
3593 /* We must set the complete callback to NULL to
3594 * avoid calling the callback more than once if
3595 * this function gets called again.
3596 */
3597 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3598
Johan Hedberg9238f362013-03-05 20:37:48 +02003599 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003600 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003601 }
3602
3603 /* Remove all pending commands belonging to this request */
3604 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3605 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3606 if (bt_cb(skb)->req.start) {
3607 __skb_queue_head(&hdev->cmd_q, skb);
3608 break;
3609 }
3610
3611 req_complete = bt_cb(skb)->req.complete;
3612 kfree_skb(skb);
3613 }
3614 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3615
3616call_complete:
3617 if (req_complete)
3618 req_complete(hdev, status);
3619}
3620
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003621static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003623 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 struct sk_buff *skb;
3625
3626 BT_DBG("%s", hdev->name);
3627
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003629 /* Send copy to monitor */
3630 hci_send_to_monitor(hdev, skb);
3631
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 if (atomic_read(&hdev->promisc)) {
3633 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003634 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635 }
3636
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003637 if (test_bit(HCI_RAW, &hdev->flags) ||
3638 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 kfree_skb(skb);
3640 continue;
3641 }
3642
3643 if (test_bit(HCI_INIT, &hdev->flags)) {
3644 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003645 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646 case HCI_ACLDATA_PKT:
3647 case HCI_SCODATA_PKT:
3648 kfree_skb(skb);
3649 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003650 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 }
3652
3653 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003654 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003656 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 hci_event_packet(hdev, skb);
3658 break;
3659
3660 case HCI_ACLDATA_PKT:
3661 BT_DBG("%s ACL data packet", hdev->name);
3662 hci_acldata_packet(hdev, skb);
3663 break;
3664
3665 case HCI_SCODATA_PKT:
3666 BT_DBG("%s SCO data packet", hdev->name);
3667 hci_scodata_packet(hdev, skb);
3668 break;
3669
3670 default:
3671 kfree_skb(skb);
3672 break;
3673 }
3674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675}
3676
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003677static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003679 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 struct sk_buff *skb;
3681
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003682 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3683 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003686 if (atomic_read(&hdev->cmd_cnt)) {
3687 skb = skb_dequeue(&hdev->cmd_q);
3688 if (!skb)
3689 return;
3690
Wei Yongjun7585b972009-02-25 18:29:52 +08003691 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003693 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003694 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003696 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003697 if (test_bit(HCI_RESET, &hdev->flags))
3698 del_timer(&hdev->cmd_timer);
3699 else
3700 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003701 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702 } else {
3703 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003704 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 }
3706 }
3707}