blob: a49ca4869621a2e5e38254ae4a57e0d07972b22e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700321
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200332
333 BT_DBG("%s %ld", hdev->name, opt);
334
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300338
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339 switch (hdev->dev_type) {
340 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200342 break;
343
344 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200345 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200352}
353
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700356 struct hci_dev *hdev = req->hdev;
357
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358 __le16 param;
359 __u8 flt_type;
360
361 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200362 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200363
364 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
367 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200369
370 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700373 /* Read Number of Supported IAC */
374 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
375
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700376 /* Read Current IAC LAP */
377 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
378
Johan Hedberg2177bab2013-03-05 20:37:43 +0200379 /* Clear Event Filters */
380 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200382
383 /* Connection accept timeout ~20 secs */
384 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700387 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
388 * but it does not support page scan related HCI commands.
389 */
390 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500391 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
392 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
393 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200394}
395
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200397{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300398 struct hci_dev *hdev = req->hdev;
399
Johan Hedberg2177bab2013-03-05 20:37:43 +0200400 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200402
403 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200404 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200405
406 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408
409 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200411
412 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200413 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300414
415 /* LE-only controllers have LE implicitly enabled */
416 if (!lmp_bredr_capable(hdev))
417 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200418}
419
420static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
421{
422 if (lmp_ext_inq_capable(hdev))
423 return 0x02;
424
425 if (lmp_inq_rssi_capable(hdev))
426 return 0x01;
427
428 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
429 hdev->lmp_subver == 0x0757)
430 return 0x01;
431
432 if (hdev->manufacturer == 15) {
433 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
434 return 0x01;
435 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
436 return 0x01;
437 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
438 return 0x01;
439 }
440
441 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
442 hdev->lmp_subver == 0x1805)
443 return 0x01;
444
445 return 0x00;
446}
447
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449{
450 u8 mode;
451
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200453
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455}
456
Johan Hedberg42c6b122013-03-05 20:37:49 +0200457static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200458{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 struct hci_dev *hdev = req->hdev;
460
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461 /* The second byte is 0xff instead of 0x9f (two reserved bits
462 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
463 * command otherwise.
464 */
465 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
466
467 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
468 * any event mask for pre 1.2 devices.
469 */
470 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
471 return;
472
473 if (lmp_bredr_capable(hdev)) {
474 events[4] |= 0x01; /* Flow Specification Complete */
475 events[4] |= 0x02; /* Inquiry Result with RSSI */
476 events[4] |= 0x04; /* Read Remote Extended Features Complete */
477 events[5] |= 0x08; /* Synchronous Connection Complete */
478 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700479 } else {
480 /* Use a different default for LE-only devices */
481 memset(events, 0, sizeof(events));
482 events[0] |= 0x10; /* Disconnection Complete */
483 events[0] |= 0x80; /* Encryption Change */
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491 }
492
493 if (lmp_inq_rssi_capable(hdev))
494 events[4] |= 0x02; /* Inquiry Result with RSSI */
495
496 if (lmp_sniffsubr_capable(hdev))
497 events[5] |= 0x20; /* Sniff Subrating */
498
499 if (lmp_pause_enc_capable(hdev))
500 events[5] |= 0x80; /* Encryption Key Refresh Complete */
501
502 if (lmp_ext_inq_capable(hdev))
503 events[5] |= 0x40; /* Extended Inquiry Result */
504
505 if (lmp_no_flush_capable(hdev))
506 events[7] |= 0x01; /* Enhanced Flush Complete */
507
508 if (lmp_lsto_capable(hdev))
509 events[6] |= 0x80; /* Link Supervision Timeout Changed */
510
511 if (lmp_ssp_capable(hdev)) {
512 events[6] |= 0x01; /* IO Capability Request */
513 events[6] |= 0x02; /* IO Capability Response */
514 events[6] |= 0x04; /* User Confirmation Request */
515 events[6] |= 0x08; /* User Passkey Request */
516 events[6] |= 0x10; /* Remote OOB Data Request */
517 events[6] |= 0x20; /* Simple Pairing Complete */
518 events[7] |= 0x04; /* User Passkey Notification */
519 events[7] |= 0x08; /* Keypress Notification */
520 events[7] |= 0x10; /* Remote Host Supported
521 * Features Notification
522 */
523 }
524
525 if (lmp_le_capable(hdev))
526 events[7] |= 0x20; /* LE Meta-Event */
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529
530 if (lmp_le_capable(hdev)) {
531 memset(events, 0, sizeof(events));
532 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
534 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535 }
536}
537
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 struct hci_dev *hdev = req->hdev;
541
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300544 else
545 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546
547 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300552 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
553 * local supported commands HCI command.
554 */
555 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200556 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557
558 if (lmp_ssp_capable(hdev)) {
559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
560 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
562 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 } else {
564 struct hci_cp_write_eir cp;
565
566 memset(hdev->eir, 0, sizeof(hdev->eir));
567 memset(&cp, 0, sizeof(cp));
568
Johan Hedberg42c6b122013-03-05 20:37:49 +0200569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200570 }
571 }
572
573 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575
576 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578
579 if (lmp_ext_feat_capable(hdev)) {
580 struct hci_cp_read_local_ext_features cp;
581
582 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
584 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585 }
586
587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
588 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
590 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200591 }
592}
593
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200596 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200597 struct hci_cp_write_def_link_policy cp;
598 u16 link_policy = 0;
599
600 if (lmp_rswitch_capable(hdev))
601 link_policy |= HCI_LP_RSWITCH;
602 if (lmp_hold_capable(hdev))
603 link_policy |= HCI_LP_HOLD;
604 if (lmp_sniff_capable(hdev))
605 link_policy |= HCI_LP_SNIFF;
606 if (lmp_park_capable(hdev))
607 link_policy |= HCI_LP_PARK;
608
609 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611}
612
Johan Hedberg42c6b122013-03-05 20:37:49 +0200613static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200614{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616 struct hci_cp_write_le_host_supported cp;
617
Johan Hedbergc73eee92013-04-19 18:35:21 +0300618 /* LE-only devices do not support explicit enablement */
619 if (!lmp_bredr_capable(hdev))
620 return;
621
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622 memset(&cp, 0, sizeof(cp));
623
624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
625 cp.le = 0x01;
626 cp.simul = lmp_le_br_capable(hdev);
627 }
628
629 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
631 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200632}
633
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300634static void hci_set_event_mask_page_2(struct hci_request *req)
635{
636 struct hci_dev *hdev = req->hdev;
637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
638
639 /* If Connectionless Slave Broadcast master role is supported
640 * enable all necessary events for it.
641 */
642 if (hdev->features[2][0] & 0x01) {
643 events[1] |= 0x40; /* Triggered Clock Capture */
644 events[1] |= 0x80; /* Synchronization Train Complete */
645 events[2] |= 0x10; /* Slave Page Response Timeout */
646 events[2] |= 0x20; /* CSB Channel Map Change */
647 }
648
649 /* If Connectionless Slave Broadcast slave role is supported
650 * enable all necessary events for it.
651 */
652 if (hdev->features[2][0] & 0x02) {
653 events[2] |= 0x01; /* Synchronization Train Received */
654 events[2] |= 0x02; /* CSB Receive */
655 events[2] |= 0x04; /* CSB Timeout */
656 events[2] |= 0x08; /* Truncated Page Complete */
657 }
658
659 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
660}
661
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300665 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200666
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100667 /* Some Broadcom based Bluetooth controllers do not support the
668 * Delete Stored Link Key command. They are clearly indicating its
669 * absence in the bit mask of supported commands.
670 *
671 * Check the supported commands and only if the the command is marked
672 * as supported send it. If not supported assume that the controller
673 * does not have actual support for stored link keys which makes this
674 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700675 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300676 if (hdev->commands[6] & 0x80) {
677 struct hci_cp_delete_stored_link_key cp;
678
679 bacpy(&cp.bdaddr, BDADDR_ANY);
680 cp.delete_all = 0x01;
681 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
682 sizeof(cp), &cp);
683 }
684
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200687
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700688 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200689 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300690
691 /* Read features beyond page 1 if available */
692 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
693 struct hci_cp_read_local_ext_features cp;
694
695 cp.page = p;
696 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
697 sizeof(cp), &cp);
698 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200699}
700
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300701static void hci_init4_req(struct hci_request *req, unsigned long opt)
702{
703 struct hci_dev *hdev = req->hdev;
704
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300705 /* Set event mask page 2 if the HCI command for it is supported */
706 if (hdev->commands[22] & 0x04)
707 hci_set_event_mask_page_2(req);
708
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300709 /* Check for Synchronization Train support */
710 if (hdev->features[2][0] & 0x04)
711 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
712}
713
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714static int __hci_init(struct hci_dev *hdev)
715{
716 int err;
717
718 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
719 if (err < 0)
720 return err;
721
722 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
723 * BR/EDR/LE type controllers. AMP controllers only need the
724 * first stage init.
725 */
726 if (hdev->dev_type != HCI_BREDR)
727 return 0;
728
729 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
730 if (err < 0)
731 return err;
732
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300733 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
734 if (err < 0)
735 return err;
736
737 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200738}
739
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
742 __u8 scan = opt;
743
Johan Hedberg42c6b122013-03-05 20:37:49 +0200744 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200747 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
749
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751{
752 __u8 auth = opt;
753
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200757 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758}
759
Johan Hedberg42c6b122013-03-05 20:37:49 +0200760static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761{
762 __u8 encrypt = opt;
763
Johan Hedberg42c6b122013-03-05 20:37:49 +0200764 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200766 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200767 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768}
769
Johan Hedberg42c6b122013-03-05 20:37:49 +0200770static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200771{
772 __le16 policy = cpu_to_le16(opt);
773
Johan Hedberg42c6b122013-03-05 20:37:49 +0200774 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200775
776 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200777 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200778}
779
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900780/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 * Device is held on return. */
782struct hci_dev *hci_dev_get(int index)
783{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200784 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
786 BT_DBG("%d", index);
787
788 if (index < 0)
789 return NULL;
790
791 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200792 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 if (d->id == index) {
794 hdev = hci_dev_hold(d);
795 break;
796 }
797 }
798 read_unlock(&hci_dev_list_lock);
799 return hdev;
800}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200803
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200804bool hci_discovery_active(struct hci_dev *hdev)
805{
806 struct discovery_state *discov = &hdev->discovery;
807
Andre Guedes6fbe1952012-02-03 17:47:58 -0300808 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300809 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300810 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200811 return true;
812
Andre Guedes6fbe1952012-02-03 17:47:58 -0300813 default:
814 return false;
815 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200816}
817
Johan Hedbergff9ef572012-01-04 14:23:45 +0200818void hci_discovery_set_state(struct hci_dev *hdev, int state)
819{
820 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
821
822 if (hdev->discovery.state == state)
823 return;
824
825 switch (state) {
826 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300827 if (hdev->discovery.state != DISCOVERY_STARTING)
828 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200829 break;
830 case DISCOVERY_STARTING:
831 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300832 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200833 mgmt_discovering(hdev, 1);
834 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200835 case DISCOVERY_RESOLVING:
836 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200837 case DISCOVERY_STOPPING:
838 break;
839 }
840
841 hdev->discovery.state = state;
842}
843
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300844void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845{
Johan Hedberg30883512012-01-04 14:16:21 +0200846 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200847 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Johan Hedberg561aafb2012-01-04 13:31:59 +0200849 list_for_each_entry_safe(p, n, &cache->all, all) {
850 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200851 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200853
854 INIT_LIST_HEAD(&cache->unknown);
855 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856}
857
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300858struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
859 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
Johan Hedberg30883512012-01-04 14:16:21 +0200861 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 struct inquiry_entry *e;
863
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300864 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Johan Hedberg561aafb2012-01-04 13:31:59 +0200866 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200868 return e;
869 }
870
871 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872}
873
Johan Hedberg561aafb2012-01-04 13:31:59 +0200874struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300875 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200876{
Johan Hedberg30883512012-01-04 14:16:21 +0200877 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200878 struct inquiry_entry *e;
879
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300880 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200881
882 list_for_each_entry(e, &cache->unknown, list) {
883 if (!bacmp(&e->data.bdaddr, bdaddr))
884 return e;
885 }
886
887 return NULL;
888}
889
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200890struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300891 bdaddr_t *bdaddr,
892 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200893{
894 struct discovery_state *cache = &hdev->discovery;
895 struct inquiry_entry *e;
896
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300897 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200898
899 list_for_each_entry(e, &cache->resolve, list) {
900 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
901 return e;
902 if (!bacmp(&e->data.bdaddr, bdaddr))
903 return e;
904 }
905
906 return NULL;
907}
908
Johan Hedberga3d4e202012-01-09 00:53:02 +0200909void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300910 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200911{
912 struct discovery_state *cache = &hdev->discovery;
913 struct list_head *pos = &cache->resolve;
914 struct inquiry_entry *p;
915
916 list_del(&ie->list);
917
918 list_for_each_entry(p, &cache->resolve, list) {
919 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300920 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200921 break;
922 pos = &p->list;
923 }
924
925 list_add(&ie->list, pos);
926}
927
Johan Hedberg31754052012-01-04 13:39:52 +0200928bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300929 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
Johan Hedberg30883512012-01-04 14:16:21 +0200931 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200932 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300934 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Szymon Janc2b2fec42012-11-20 11:38:54 +0100936 hci_remove_remote_oob_data(hdev, &data->bdaddr);
937
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200938 if (ssp)
939 *ssp = data->ssp_mode;
940
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200941 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200942 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200943 if (ie->data.ssp_mode && ssp)
944 *ssp = true;
945
Johan Hedberga3d4e202012-01-09 00:53:02 +0200946 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300947 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200948 ie->data.rssi = data->rssi;
949 hci_inquiry_cache_update_resolve(hdev, ie);
950 }
951
Johan Hedberg561aafb2012-01-04 13:31:59 +0200952 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200953 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200954
Johan Hedberg561aafb2012-01-04 13:31:59 +0200955 /* Entry not in the cache. Add new one. */
956 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
957 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200958 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200959
960 list_add(&ie->all, &cache->all);
961
962 if (name_known) {
963 ie->name_state = NAME_KNOWN;
964 } else {
965 ie->name_state = NAME_NOT_KNOWN;
966 list_add(&ie->list, &cache->unknown);
967 }
968
969update:
970 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300971 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200972 ie->name_state = NAME_KNOWN;
973 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 }
975
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200976 memcpy(&ie->data, data, sizeof(*data));
977 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200979
980 if (ie->name_state == NAME_NOT_KNOWN)
981 return false;
982
983 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
985
986static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
987{
Johan Hedberg30883512012-01-04 14:16:21 +0200988 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 struct inquiry_info *info = (struct inquiry_info *) buf;
990 struct inquiry_entry *e;
991 int copied = 0;
992
Johan Hedberg561aafb2012-01-04 13:31:59 +0200993 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200995
996 if (copied >= num)
997 break;
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 bacpy(&info->bdaddr, &data->bdaddr);
1000 info->pscan_rep_mode = data->pscan_rep_mode;
1001 info->pscan_period_mode = data->pscan_period_mode;
1002 info->pscan_mode = data->pscan_mode;
1003 memcpy(info->dev_class, data->dev_class, 3);
1004 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001005
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001007 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 }
1009
1010 BT_DBG("cache %p, copied %d", cache, copied);
1011 return copied;
1012}
1013
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015{
1016 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001017 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 struct hci_cp_inquiry cp;
1019
1020 BT_DBG("%s", hdev->name);
1021
1022 if (test_bit(HCI_INQUIRY, &hdev->flags))
1023 return;
1024
1025 /* Start Inquiry */
1026 memcpy(&cp.lap, &ir->lap, 3);
1027 cp.length = ir->length;
1028 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001029 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030}
1031
Andre Guedes3e13fa12013-03-27 20:04:56 -03001032static int wait_inquiry(void *word)
1033{
1034 schedule();
1035 return signal_pending(current);
1036}
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038int hci_inquiry(void __user *arg)
1039{
1040 __u8 __user *ptr = arg;
1041 struct hci_inquiry_req ir;
1042 struct hci_dev *hdev;
1043 int err = 0, do_inquiry = 0, max_rsp;
1044 long timeo;
1045 __u8 *buf;
1046
1047 if (copy_from_user(&ir, ptr, sizeof(ir)))
1048 return -EFAULT;
1049
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001050 hdev = hci_dev_get(ir.dev_id);
1051 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 return -ENODEV;
1053
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001054 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1055 err = -EBUSY;
1056 goto done;
1057 }
1058
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001059 if (hdev->dev_type != HCI_BREDR) {
1060 err = -EOPNOTSUPP;
1061 goto done;
1062 }
1063
Johan Hedberg56f87902013-10-02 13:43:13 +03001064 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1065 err = -EOPNOTSUPP;
1066 goto done;
1067 }
1068
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001069 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001070 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001071 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001072 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 do_inquiry = 1;
1074 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001075 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Marcel Holtmann04837f62006-07-03 10:02:33 +02001077 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001078
1079 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001080 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1081 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001082 if (err < 0)
1083 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001084
1085 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1086 * cleared). If it is interrupted by a signal, return -EINTR.
1087 */
1088 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1089 TASK_INTERRUPTIBLE))
1090 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001093 /* for unlimited number of responses we will use buffer with
1094 * 255 entries
1095 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1097
1098 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1099 * copy it to the user space.
1100 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001101 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001102 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 err = -ENOMEM;
1104 goto done;
1105 }
1106
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001107 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001109 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 BT_DBG("num_rsp %d", ir.num_rsp);
1112
1113 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1114 ptr += sizeof(ir);
1115 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001116 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001118 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 err = -EFAULT;
1120
1121 kfree(buf);
1122
1123done:
1124 hci_dev_put(hdev);
1125 return err;
1126}
1127
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001128static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 int ret = 0;
1131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 BT_DBG("%s %p", hdev->name, hdev);
1133
1134 hci_req_lock(hdev);
1135
Johan Hovold94324962012-03-15 14:48:41 +01001136 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1137 ret = -ENODEV;
1138 goto done;
1139 }
1140
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001141 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1142 /* Check for rfkill but allow the HCI setup stage to
1143 * proceed (which in itself doesn't cause any RF activity).
1144 */
1145 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1146 ret = -ERFKILL;
1147 goto done;
1148 }
1149
1150 /* Check for valid public address or a configured static
1151 * random adddress, but let the HCI setup proceed to
1152 * be able to determine if there is a public address
1153 * or not.
1154 *
1155 * This check is only valid for BR/EDR controllers
1156 * since AMP controllers do not have an address.
1157 */
1158 if (hdev->dev_type == HCI_BREDR &&
1159 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1160 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1161 ret = -EADDRNOTAVAIL;
1162 goto done;
1163 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001164 }
1165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 if (test_bit(HCI_UP, &hdev->flags)) {
1167 ret = -EALREADY;
1168 goto done;
1169 }
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 if (hdev->open(hdev)) {
1172 ret = -EIO;
1173 goto done;
1174 }
1175
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001176 atomic_set(&hdev->cmd_cnt, 1);
1177 set_bit(HCI_INIT, &hdev->flags);
1178
1179 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1180 ret = hdev->setup(hdev);
1181
1182 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001183 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1184 set_bit(HCI_RAW, &hdev->flags);
1185
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001186 if (!test_bit(HCI_RAW, &hdev->flags) &&
1187 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001188 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 }
1190
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001191 clear_bit(HCI_INIT, &hdev->flags);
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 if (!ret) {
1194 hci_dev_hold(hdev);
1195 set_bit(HCI_UP, &hdev->flags);
1196 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001197 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001198 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001199 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001200 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001201 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001202 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001203 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001204 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001206 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001207 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001208 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
1210 skb_queue_purge(&hdev->cmd_q);
1211 skb_queue_purge(&hdev->rx_q);
1212
1213 if (hdev->flush)
1214 hdev->flush(hdev);
1215
1216 if (hdev->sent_cmd) {
1217 kfree_skb(hdev->sent_cmd);
1218 hdev->sent_cmd = NULL;
1219 }
1220
1221 hdev->close(hdev);
1222 hdev->flags = 0;
1223 }
1224
1225done:
1226 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 return ret;
1228}
1229
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001230/* ---- HCI ioctl helpers ---- */
1231
1232int hci_dev_open(__u16 dev)
1233{
1234 struct hci_dev *hdev;
1235 int err;
1236
1237 hdev = hci_dev_get(dev);
1238 if (!hdev)
1239 return -ENODEV;
1240
Johan Hedberge1d08f42013-10-01 22:44:50 +03001241 /* We need to ensure that no other power on/off work is pending
1242 * before proceeding to call hci_dev_do_open. This is
1243 * particularly important if the setup procedure has not yet
1244 * completed.
1245 */
1246 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1247 cancel_delayed_work(&hdev->power_off);
1248
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001249 /* After this call it is guaranteed that the setup procedure
1250 * has finished. This means that error conditions like RFKILL
1251 * or no valid public or static random address apply.
1252 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001253 flush_workqueue(hdev->req_workqueue);
1254
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001255 err = hci_dev_do_open(hdev);
1256
1257 hci_dev_put(hdev);
1258
1259 return err;
1260}
1261
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262static int hci_dev_do_close(struct hci_dev *hdev)
1263{
1264 BT_DBG("%s %p", hdev->name, hdev);
1265
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001266 cancel_delayed_work(&hdev->power_off);
1267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 hci_req_cancel(hdev, ENODEV);
1269 hci_req_lock(hdev);
1270
1271 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001272 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 hci_req_unlock(hdev);
1274 return 0;
1275 }
1276
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001277 /* Flush RX and TX works */
1278 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001279 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001281 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001282 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001283 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001284 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001285 }
1286
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001287 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001288 cancel_delayed_work(&hdev->service_cache);
1289
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001290 cancel_delayed_work_sync(&hdev->le_scan_disable);
1291
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001292 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001293 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001295 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297 hci_notify(hdev, HCI_DEV_DOWN);
1298
1299 if (hdev->flush)
1300 hdev->flush(hdev);
1301
1302 /* Reset device */
1303 skb_queue_purge(&hdev->cmd_q);
1304 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001305 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001306 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001307 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001309 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 clear_bit(HCI_INIT, &hdev->flags);
1311 }
1312
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001313 /* flush cmd work */
1314 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
1316 /* Drop queues */
1317 skb_queue_purge(&hdev->rx_q);
1318 skb_queue_purge(&hdev->cmd_q);
1319 skb_queue_purge(&hdev->raw_q);
1320
1321 /* Drop last sent command */
1322 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001323 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 kfree_skb(hdev->sent_cmd);
1325 hdev->sent_cmd = NULL;
1326 }
1327
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001328 kfree_skb(hdev->recv_evt);
1329 hdev->recv_evt = NULL;
1330
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 /* After this point our queues are empty
1332 * and no tasks are scheduled. */
1333 hdev->close(hdev);
1334
Johan Hedberg35b973c2013-03-15 17:06:59 -05001335 /* Clear flags */
1336 hdev->flags = 0;
1337 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1338
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001339 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1340 if (hdev->dev_type == HCI_BREDR) {
1341 hci_dev_lock(hdev);
1342 mgmt_powered(hdev, 0);
1343 hci_dev_unlock(hdev);
1344 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001345 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001346
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001347 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001348 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001349
Johan Hedberge59fda82012-02-22 18:11:53 +02001350 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001351 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001352
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 hci_req_unlock(hdev);
1354
1355 hci_dev_put(hdev);
1356 return 0;
1357}
1358
1359int hci_dev_close(__u16 dev)
1360{
1361 struct hci_dev *hdev;
1362 int err;
1363
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001364 hdev = hci_dev_get(dev);
1365 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001367
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001368 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1369 err = -EBUSY;
1370 goto done;
1371 }
1372
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001373 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1374 cancel_delayed_work(&hdev->power_off);
1375
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001377
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001378done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 hci_dev_put(hdev);
1380 return err;
1381}
1382
1383int hci_dev_reset(__u16 dev)
1384{
1385 struct hci_dev *hdev;
1386 int ret = 0;
1387
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001388 hdev = hci_dev_get(dev);
1389 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 return -ENODEV;
1391
1392 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
Marcel Holtmann808a0492013-08-26 20:57:58 -07001394 if (!test_bit(HCI_UP, &hdev->flags)) {
1395 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001399 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1400 ret = -EBUSY;
1401 goto done;
1402 }
1403
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 /* Drop queues */
1405 skb_queue_purge(&hdev->rx_q);
1406 skb_queue_purge(&hdev->cmd_q);
1407
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001408 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001409 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001411 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412
1413 if (hdev->flush)
1414 hdev->flush(hdev);
1415
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001416 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001417 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
1419 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001420 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
1422done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 hci_req_unlock(hdev);
1424 hci_dev_put(hdev);
1425 return ret;
1426}
1427
1428int hci_dev_reset_stat(__u16 dev)
1429{
1430 struct hci_dev *hdev;
1431 int ret = 0;
1432
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001433 hdev = hci_dev_get(dev);
1434 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 return -ENODEV;
1436
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001437 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1438 ret = -EBUSY;
1439 goto done;
1440 }
1441
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1443
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001444done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 return ret;
1447}
1448
1449int hci_dev_cmd(unsigned int cmd, void __user *arg)
1450{
1451 struct hci_dev *hdev;
1452 struct hci_dev_req dr;
1453 int err = 0;
1454
1455 if (copy_from_user(&dr, arg, sizeof(dr)))
1456 return -EFAULT;
1457
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001458 hdev = hci_dev_get(dr.dev_id);
1459 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 return -ENODEV;
1461
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001462 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1463 err = -EBUSY;
1464 goto done;
1465 }
1466
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001467 if (hdev->dev_type != HCI_BREDR) {
1468 err = -EOPNOTSUPP;
1469 goto done;
1470 }
1471
Johan Hedberg56f87902013-10-02 13:43:13 +03001472 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1473 err = -EOPNOTSUPP;
1474 goto done;
1475 }
1476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 switch (cmd) {
1478 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001479 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1480 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 break;
1482
1483 case HCISETENCRYPT:
1484 if (!lmp_encrypt_capable(hdev)) {
1485 err = -EOPNOTSUPP;
1486 break;
1487 }
1488
1489 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1490 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001491 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1492 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if (err)
1494 break;
1495 }
1496
Johan Hedberg01178cd2013-03-05 20:37:41 +02001497 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1498 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 break;
1500
1501 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001502 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1503 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 break;
1505
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001506 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001507 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1508 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001509 break;
1510
1511 case HCISETLINKMODE:
1512 hdev->link_mode = ((__u16) dr.dev_opt) &
1513 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1514 break;
1515
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 case HCISETPTYPE:
1517 hdev->pkt_type = (__u16) dr.dev_opt;
1518 break;
1519
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001521 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1522 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 break;
1524
1525 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001526 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1527 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 break;
1529
1530 default:
1531 err = -EINVAL;
1532 break;
1533 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001534
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001535done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 hci_dev_put(hdev);
1537 return err;
1538}
1539
1540int hci_get_dev_list(void __user *arg)
1541{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001542 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 struct hci_dev_list_req *dl;
1544 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 int n = 0, size, err;
1546 __u16 dev_num;
1547
1548 if (get_user(dev_num, (__u16 __user *) arg))
1549 return -EFAULT;
1550
1551 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1552 return -EINVAL;
1553
1554 size = sizeof(*dl) + dev_num * sizeof(*dr);
1555
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001556 dl = kzalloc(size, GFP_KERNEL);
1557 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 return -ENOMEM;
1559
1560 dr = dl->dev_req;
1561
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001562 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001563 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001564 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001565 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001566
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001567 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1568 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001569
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 (dr + n)->dev_id = hdev->id;
1571 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001572
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 if (++n >= dev_num)
1574 break;
1575 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001576 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
1578 dl->dev_num = n;
1579 size = sizeof(*dl) + n * sizeof(*dr);
1580
1581 err = copy_to_user(arg, dl, size);
1582 kfree(dl);
1583
1584 return err ? -EFAULT : 0;
1585}
1586
1587int hci_get_dev_info(void __user *arg)
1588{
1589 struct hci_dev *hdev;
1590 struct hci_dev_info di;
1591 int err = 0;
1592
1593 if (copy_from_user(&di, arg, sizeof(di)))
1594 return -EFAULT;
1595
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001596 hdev = hci_dev_get(di.dev_id);
1597 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 return -ENODEV;
1599
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001600 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001601 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001602
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001603 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1604 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001605
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 strcpy(di.name, hdev->name);
1607 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001608 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 di.flags = hdev->flags;
1610 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001611 if (lmp_bredr_capable(hdev)) {
1612 di.acl_mtu = hdev->acl_mtu;
1613 di.acl_pkts = hdev->acl_pkts;
1614 di.sco_mtu = hdev->sco_mtu;
1615 di.sco_pkts = hdev->sco_pkts;
1616 } else {
1617 di.acl_mtu = hdev->le_mtu;
1618 di.acl_pkts = hdev->le_pkts;
1619 di.sco_mtu = 0;
1620 di.sco_pkts = 0;
1621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 di.link_policy = hdev->link_policy;
1623 di.link_mode = hdev->link_mode;
1624
1625 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1626 memcpy(&di.features, &hdev->features, sizeof(di.features));
1627
1628 if (copy_to_user(arg, &di, sizeof(di)))
1629 err = -EFAULT;
1630
1631 hci_dev_put(hdev);
1632
1633 return err;
1634}
1635
1636/* ---- Interface to HCI drivers ---- */
1637
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001638static int hci_rfkill_set_block(void *data, bool blocked)
1639{
1640 struct hci_dev *hdev = data;
1641
1642 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1643
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001644 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1645 return -EBUSY;
1646
Johan Hedberg5e130362013-09-13 08:58:17 +03001647 if (blocked) {
1648 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001649 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1650 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001651 } else {
1652 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001653 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001654
1655 return 0;
1656}
1657
1658static const struct rfkill_ops hci_rfkill_ops = {
1659 .set_block = hci_rfkill_set_block,
1660};
1661
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001662static void hci_power_on(struct work_struct *work)
1663{
1664 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001665 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001666
1667 BT_DBG("%s", hdev->name);
1668
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001669 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001670 if (err < 0) {
1671 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001672 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001673 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001674
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001675 /* During the HCI setup phase, a few error conditions are
1676 * ignored and they need to be checked now. If they are still
1677 * valid, it is important to turn the device back off.
1678 */
1679 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1680 (hdev->dev_type == HCI_BREDR &&
1681 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1682 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001683 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1684 hci_dev_do_close(hdev);
1685 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001686 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1687 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001688 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001689
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001690 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001691 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001692}
1693
1694static void hci_power_off(struct work_struct *work)
1695{
Johan Hedberg32435532011-11-07 22:16:04 +02001696 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001697 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001698
1699 BT_DBG("%s", hdev->name);
1700
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001701 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001702}
1703
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001704static void hci_discov_off(struct work_struct *work)
1705{
1706 struct hci_dev *hdev;
Marcel Holtmannb1e73122013-10-15 06:33:51 -07001707 struct hci_request req;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001708 u8 scan = SCAN_PAGE;
1709
1710 hdev = container_of(work, struct hci_dev, discov_off.work);
1711
1712 BT_DBG("%s", hdev->name);
1713
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001714 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001715
Marcel Holtmannb1e73122013-10-15 06:33:51 -07001716 hci_req_init(&req, hdev);
1717 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1718 hci_req_run(&req, NULL);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001719
1720 hdev->discov_timeout = 0;
1721
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001722 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001723}
1724
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001725int hci_uuids_clear(struct hci_dev *hdev)
1726{
Johan Hedberg48210022013-01-27 00:31:28 +02001727 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001728
Johan Hedberg48210022013-01-27 00:31:28 +02001729 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1730 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001731 kfree(uuid);
1732 }
1733
1734 return 0;
1735}
1736
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001737int hci_link_keys_clear(struct hci_dev *hdev)
1738{
1739 struct list_head *p, *n;
1740
1741 list_for_each_safe(p, n, &hdev->link_keys) {
1742 struct link_key *key;
1743
1744 key = list_entry(p, struct link_key, list);
1745
1746 list_del(p);
1747 kfree(key);
1748 }
1749
1750 return 0;
1751}
1752
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001753int hci_smp_ltks_clear(struct hci_dev *hdev)
1754{
1755 struct smp_ltk *k, *tmp;
1756
1757 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1758 list_del(&k->list);
1759 kfree(k);
1760 }
1761
1762 return 0;
1763}
1764
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001765struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1766{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001767 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001768
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001769 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001770 if (bacmp(bdaddr, &k->bdaddr) == 0)
1771 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001772
1773 return NULL;
1774}
1775
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301776static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001777 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001778{
1779 /* Legacy key */
1780 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301781 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001782
1783 /* Debug keys are insecure so don't store them persistently */
1784 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301785 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001786
1787 /* Changed combination key and there's no previous one */
1788 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301789 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001790
1791 /* Security mode 3 case */
1792 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301793 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001794
1795 /* Neither local nor remote side had no-bonding as requirement */
1796 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301797 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001798
1799 /* Local side had dedicated bonding as requirement */
1800 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301801 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001802
1803 /* Remote side had dedicated bonding as requirement */
1804 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301805 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001806
1807 /* If none of the above criteria match, then don't store the key
1808 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301809 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001810}
1811
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001812struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001813{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001814 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001815
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001816 list_for_each_entry(k, &hdev->long_term_keys, list) {
1817 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001818 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001819 continue;
1820
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001821 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001822 }
1823
1824 return NULL;
1825}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001826
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001827struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001828 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001829{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001830 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001831
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001832 list_for_each_entry(k, &hdev->long_term_keys, list)
1833 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001834 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001835 return k;
1836
1837 return NULL;
1838}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001839
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001840int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001841 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001842{
1843 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301844 u8 old_key_type;
1845 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001846
1847 old_key = hci_find_link_key(hdev, bdaddr);
1848 if (old_key) {
1849 old_key_type = old_key->type;
1850 key = old_key;
1851 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001852 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001853 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1854 if (!key)
1855 return -ENOMEM;
1856 list_add(&key->list, &hdev->link_keys);
1857 }
1858
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001859 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001860
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001861 /* Some buggy controller combinations generate a changed
1862 * combination key for legacy pairing even when there's no
1863 * previous key */
1864 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001865 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001866 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001867 if (conn)
1868 conn->key_type = type;
1869 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001870
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001871 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001872 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001873 key->pin_len = pin_len;
1874
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001875 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001876 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001877 else
1878 key->type = type;
1879
Johan Hedberg4df378a2011-04-28 11:29:03 -07001880 if (!new_key)
1881 return 0;
1882
1883 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1884
Johan Hedberg744cf192011-11-08 20:40:14 +02001885 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001886
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301887 if (conn)
1888 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001889
1890 return 0;
1891}
1892
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001893int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001894 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001895 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001896{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001897 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001898
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001899 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1900 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001901
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001902 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1903 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001904 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001905 else {
1906 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001907 if (!key)
1908 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001909 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001910 }
1911
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001912 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001913 key->bdaddr_type = addr_type;
1914 memcpy(key->val, tk, sizeof(key->val));
1915 key->authenticated = authenticated;
1916 key->ediv = ediv;
1917 key->enc_size = enc_size;
1918 key->type = type;
1919 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001920
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001921 if (!new_key)
1922 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001923
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001924 if (type & HCI_SMP_LTK)
1925 mgmt_new_ltk(hdev, key, 1);
1926
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001927 return 0;
1928}
1929
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001930int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1931{
1932 struct link_key *key;
1933
1934 key = hci_find_link_key(hdev, bdaddr);
1935 if (!key)
1936 return -ENOENT;
1937
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001938 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001939
1940 list_del(&key->list);
1941 kfree(key);
1942
1943 return 0;
1944}
1945
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001946int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1947{
1948 struct smp_ltk *k, *tmp;
1949
1950 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1951 if (bacmp(bdaddr, &k->bdaddr))
1952 continue;
1953
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001954 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001955
1956 list_del(&k->list);
1957 kfree(k);
1958 }
1959
1960 return 0;
1961}
1962
Ville Tervo6bd32322011-02-16 16:32:41 +02001963/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001964static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001965{
1966 struct hci_dev *hdev = (void *) arg;
1967
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001968 if (hdev->sent_cmd) {
1969 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1970 u16 opcode = __le16_to_cpu(sent->opcode);
1971
1972 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1973 } else {
1974 BT_ERR("%s command tx timeout", hdev->name);
1975 }
1976
Ville Tervo6bd32322011-02-16 16:32:41 +02001977 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001978 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001979}
1980
Szymon Janc2763eda2011-03-22 13:12:22 +01001981struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001982 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001983{
1984 struct oob_data *data;
1985
1986 list_for_each_entry(data, &hdev->remote_oob_data, list)
1987 if (bacmp(bdaddr, &data->bdaddr) == 0)
1988 return data;
1989
1990 return NULL;
1991}
1992
1993int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1994{
1995 struct oob_data *data;
1996
1997 data = hci_find_remote_oob_data(hdev, bdaddr);
1998 if (!data)
1999 return -ENOENT;
2000
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002001 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002002
2003 list_del(&data->list);
2004 kfree(data);
2005
2006 return 0;
2007}
2008
2009int hci_remote_oob_data_clear(struct hci_dev *hdev)
2010{
2011 struct oob_data *data, *n;
2012
2013 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2014 list_del(&data->list);
2015 kfree(data);
2016 }
2017
2018 return 0;
2019}
2020
2021int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002022 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002023{
2024 struct oob_data *data;
2025
2026 data = hci_find_remote_oob_data(hdev, bdaddr);
2027
2028 if (!data) {
2029 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2030 if (!data)
2031 return -ENOMEM;
2032
2033 bacpy(&data->bdaddr, bdaddr);
2034 list_add(&data->list, &hdev->remote_oob_data);
2035 }
2036
2037 memcpy(data->hash, hash, sizeof(data->hash));
2038 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2039
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002040 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002041
2042 return 0;
2043}
2044
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002045struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002046{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002047 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002048
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002049 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002050 if (bacmp(bdaddr, &b->bdaddr) == 0)
2051 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002052
2053 return NULL;
2054}
2055
2056int hci_blacklist_clear(struct hci_dev *hdev)
2057{
2058 struct list_head *p, *n;
2059
2060 list_for_each_safe(p, n, &hdev->blacklist) {
2061 struct bdaddr_list *b;
2062
2063 b = list_entry(p, struct bdaddr_list, list);
2064
2065 list_del(p);
2066 kfree(b);
2067 }
2068
2069 return 0;
2070}
2071
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002072int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002073{
2074 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002075
2076 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2077 return -EBADF;
2078
Antti Julku5e762442011-08-25 16:48:02 +03002079 if (hci_blacklist_lookup(hdev, bdaddr))
2080 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002081
2082 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002083 if (!entry)
2084 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002085
2086 bacpy(&entry->bdaddr, bdaddr);
2087
2088 list_add(&entry->list, &hdev->blacklist);
2089
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002090 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002091}
2092
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002093int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002094{
2095 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002096
Szymon Janc1ec918c2011-11-16 09:32:21 +01002097 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002098 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002099
2100 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002101 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002102 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002103
2104 list_del(&entry->list);
2105 kfree(entry);
2106
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002107 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002108}
2109
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002110static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002111{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002112 if (status) {
2113 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002114
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002115 hci_dev_lock(hdev);
2116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2117 hci_dev_unlock(hdev);
2118 return;
2119 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002120}
2121
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002122static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002123{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002124 /* General inquiry access code (GIAC) */
2125 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2126 struct hci_request req;
2127 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002128 int err;
2129
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002130 if (status) {
2131 BT_ERR("Failed to disable LE scanning: status %d", status);
2132 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002133 }
2134
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002135 switch (hdev->discovery.type) {
2136 case DISCOV_TYPE_LE:
2137 hci_dev_lock(hdev);
2138 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2139 hci_dev_unlock(hdev);
2140 break;
2141
2142 case DISCOV_TYPE_INTERLEAVED:
2143 hci_req_init(&req, hdev);
2144
2145 memset(&cp, 0, sizeof(cp));
2146 memcpy(&cp.lap, lap, sizeof(cp.lap));
2147 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2148 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2149
2150 hci_dev_lock(hdev);
2151
2152 hci_inquiry_cache_flush(hdev);
2153
2154 err = hci_req_run(&req, inquiry_complete);
2155 if (err) {
2156 BT_ERR("Inquiry request failed: err %d", err);
2157 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2158 }
2159
2160 hci_dev_unlock(hdev);
2161 break;
2162 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002163}
2164
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002165static void le_scan_disable_work(struct work_struct *work)
2166{
2167 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002168 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002169 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002170 struct hci_request req;
2171 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002172
2173 BT_DBG("%s", hdev->name);
2174
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002175 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002176
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002177 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002178 cp.enable = LE_SCAN_DISABLE;
2179 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002180
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002181 err = hci_req_run(&req, le_scan_disable_work_complete);
2182 if (err)
2183 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002184}
2185
David Herrmann9be0dab2012-04-22 14:39:57 +02002186/* Alloc HCI device */
2187struct hci_dev *hci_alloc_dev(void)
2188{
2189 struct hci_dev *hdev;
2190
2191 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2192 if (!hdev)
2193 return NULL;
2194
David Herrmannb1b813d2012-04-22 14:39:58 +02002195 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2196 hdev->esco_type = (ESCO_HV1);
2197 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002198 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2199 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002202
David Herrmannb1b813d2012-04-22 14:39:58 +02002203 hdev->sniff_max_interval = 800;
2204 hdev->sniff_min_interval = 80;
2205
Marcel Holtmannbef64732013-10-11 08:23:19 -07002206 hdev->le_scan_interval = 0x0060;
2207 hdev->le_scan_window = 0x0030;
2208
David Herrmannb1b813d2012-04-22 14:39:58 +02002209 mutex_init(&hdev->lock);
2210 mutex_init(&hdev->req_lock);
2211
2212 INIT_LIST_HEAD(&hdev->mgmt_pending);
2213 INIT_LIST_HEAD(&hdev->blacklist);
2214 INIT_LIST_HEAD(&hdev->uuids);
2215 INIT_LIST_HEAD(&hdev->link_keys);
2216 INIT_LIST_HEAD(&hdev->long_term_keys);
2217 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002218 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002219
2220 INIT_WORK(&hdev->rx_work, hci_rx_work);
2221 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2222 INIT_WORK(&hdev->tx_work, hci_tx_work);
2223 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002224
David Herrmannb1b813d2012-04-22 14:39:58 +02002225 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2226 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2227 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2228
David Herrmannb1b813d2012-04-22 14:39:58 +02002229 skb_queue_head_init(&hdev->rx_q);
2230 skb_queue_head_init(&hdev->cmd_q);
2231 skb_queue_head_init(&hdev->raw_q);
2232
2233 init_waitqueue_head(&hdev->req_wait_q);
2234
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002235 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002236
David Herrmannb1b813d2012-04-22 14:39:58 +02002237 hci_init_sysfs(hdev);
2238 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002239
2240 return hdev;
2241}
2242EXPORT_SYMBOL(hci_alloc_dev);
2243
2244/* Free HCI device */
2245void hci_free_dev(struct hci_dev *hdev)
2246{
David Herrmann9be0dab2012-04-22 14:39:57 +02002247 /* will free via device release */
2248 put_device(&hdev->dev);
2249}
2250EXPORT_SYMBOL(hci_free_dev);
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252/* Register HCI device */
2253int hci_register_dev(struct hci_dev *hdev)
2254{
David Herrmannb1b813d2012-04-22 14:39:58 +02002255 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
David Herrmann010666a2012-01-07 15:47:07 +01002257 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 return -EINVAL;
2259
Mat Martineau08add512011-11-02 16:18:36 -07002260 /* Do not allow HCI_AMP devices to register at index 0,
2261 * so the index can be used as the AMP controller ID.
2262 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002263 switch (hdev->dev_type) {
2264 case HCI_BREDR:
2265 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2266 break;
2267 case HCI_AMP:
2268 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2269 break;
2270 default:
2271 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002273
Sasha Levin3df92b32012-05-27 22:36:56 +02002274 if (id < 0)
2275 return id;
2276
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 sprintf(hdev->name, "hci%d", id);
2278 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002279
2280 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2281
Kees Cookd8537542013-07-03 15:04:57 -07002282 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2283 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002284 if (!hdev->workqueue) {
2285 error = -ENOMEM;
2286 goto err;
2287 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002288
Kees Cookd8537542013-07-03 15:04:57 -07002289 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2290 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002291 if (!hdev->req_workqueue) {
2292 destroy_workqueue(hdev->workqueue);
2293 error = -ENOMEM;
2294 goto err;
2295 }
2296
David Herrmann33ca9542011-10-08 14:58:49 +02002297 error = hci_add_sysfs(hdev);
2298 if (error < 0)
2299 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002301 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002302 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2303 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002304 if (hdev->rfkill) {
2305 if (rfkill_register(hdev->rfkill) < 0) {
2306 rfkill_destroy(hdev->rfkill);
2307 hdev->rfkill = NULL;
2308 }
2309 }
2310
Johan Hedberg5e130362013-09-13 08:58:17 +03002311 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2312 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2313
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002314 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002315 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002316
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002317 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002318 /* Assume BR/EDR support until proven otherwise (such as
2319 * through reading supported features during init.
2320 */
2321 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2322 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002323
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002324 write_lock(&hci_dev_list_lock);
2325 list_add(&hdev->list, &hci_dev_list);
2326 write_unlock(&hci_dev_list_lock);
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002329 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Johan Hedberg19202572013-01-14 22:33:51 +02002331 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002334
David Herrmann33ca9542011-10-08 14:58:49 +02002335err_wqueue:
2336 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002337 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002338err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002339 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002340
David Herrmann33ca9542011-10-08 14:58:49 +02002341 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342}
2343EXPORT_SYMBOL(hci_register_dev);
2344
2345/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002346void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347{
Sasha Levin3df92b32012-05-27 22:36:56 +02002348 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002349
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002350 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Johan Hovold94324962012-03-15 14:48:41 +01002352 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2353
Sasha Levin3df92b32012-05-27 22:36:56 +02002354 id = hdev->id;
2355
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002356 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002358 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
2360 hci_dev_do_close(hdev);
2361
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302362 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002363 kfree_skb(hdev->reassembly[i]);
2364
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002365 cancel_work_sync(&hdev->power_on);
2366
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002367 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002368 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002369 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002370 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002371 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002372 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002373
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002374 /* mgmt_index_removed should take care of emptying the
2375 * pending list */
2376 BUG_ON(!list_empty(&hdev->mgmt_pending));
2377
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 hci_notify(hdev, HCI_DEV_UNREG);
2379
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002380 if (hdev->rfkill) {
2381 rfkill_unregister(hdev->rfkill);
2382 rfkill_destroy(hdev->rfkill);
2383 }
2384
David Herrmannce242972011-10-08 14:58:48 +02002385 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002386
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002387 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002388 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002389
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002390 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002391 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002392 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002393 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002394 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002395 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002396 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002397
David Herrmanndc946bd2012-01-07 15:47:24 +01002398 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002399
2400 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401}
2402EXPORT_SYMBOL(hci_unregister_dev);
2403
2404/* Suspend HCI device */
2405int hci_suspend_dev(struct hci_dev *hdev)
2406{
2407 hci_notify(hdev, HCI_DEV_SUSPEND);
2408 return 0;
2409}
2410EXPORT_SYMBOL(hci_suspend_dev);
2411
2412/* Resume HCI device */
2413int hci_resume_dev(struct hci_dev *hdev)
2414{
2415 hci_notify(hdev, HCI_DEV_RESUME);
2416 return 0;
2417}
2418EXPORT_SYMBOL(hci_resume_dev);
2419
Marcel Holtmann76bca882009-11-18 00:40:39 +01002420/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002421int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002422{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002423 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002424 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002425 kfree_skb(skb);
2426 return -ENXIO;
2427 }
2428
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002429 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002430 bt_cb(skb)->incoming = 1;
2431
2432 /* Time stamp */
2433 __net_timestamp(skb);
2434
Marcel Holtmann76bca882009-11-18 00:40:39 +01002435 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002436 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002437
Marcel Holtmann76bca882009-11-18 00:40:39 +01002438 return 0;
2439}
2440EXPORT_SYMBOL(hci_recv_frame);
2441
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302442static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002443 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302444{
2445 int len = 0;
2446 int hlen = 0;
2447 int remain = count;
2448 struct sk_buff *skb;
2449 struct bt_skb_cb *scb;
2450
2451 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002452 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302453 return -EILSEQ;
2454
2455 skb = hdev->reassembly[index];
2456
2457 if (!skb) {
2458 switch (type) {
2459 case HCI_ACLDATA_PKT:
2460 len = HCI_MAX_FRAME_SIZE;
2461 hlen = HCI_ACL_HDR_SIZE;
2462 break;
2463 case HCI_EVENT_PKT:
2464 len = HCI_MAX_EVENT_SIZE;
2465 hlen = HCI_EVENT_HDR_SIZE;
2466 break;
2467 case HCI_SCODATA_PKT:
2468 len = HCI_MAX_SCO_SIZE;
2469 hlen = HCI_SCO_HDR_SIZE;
2470 break;
2471 }
2472
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002473 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302474 if (!skb)
2475 return -ENOMEM;
2476
2477 scb = (void *) skb->cb;
2478 scb->expect = hlen;
2479 scb->pkt_type = type;
2480
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302481 hdev->reassembly[index] = skb;
2482 }
2483
2484 while (count) {
2485 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002486 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302487
2488 memcpy(skb_put(skb, len), data, len);
2489
2490 count -= len;
2491 data += len;
2492 scb->expect -= len;
2493 remain = count;
2494
2495 switch (type) {
2496 case HCI_EVENT_PKT:
2497 if (skb->len == HCI_EVENT_HDR_SIZE) {
2498 struct hci_event_hdr *h = hci_event_hdr(skb);
2499 scb->expect = h->plen;
2500
2501 if (skb_tailroom(skb) < scb->expect) {
2502 kfree_skb(skb);
2503 hdev->reassembly[index] = NULL;
2504 return -ENOMEM;
2505 }
2506 }
2507 break;
2508
2509 case HCI_ACLDATA_PKT:
2510 if (skb->len == HCI_ACL_HDR_SIZE) {
2511 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2512 scb->expect = __le16_to_cpu(h->dlen);
2513
2514 if (skb_tailroom(skb) < scb->expect) {
2515 kfree_skb(skb);
2516 hdev->reassembly[index] = NULL;
2517 return -ENOMEM;
2518 }
2519 }
2520 break;
2521
2522 case HCI_SCODATA_PKT:
2523 if (skb->len == HCI_SCO_HDR_SIZE) {
2524 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2525 scb->expect = h->dlen;
2526
2527 if (skb_tailroom(skb) < scb->expect) {
2528 kfree_skb(skb);
2529 hdev->reassembly[index] = NULL;
2530 return -ENOMEM;
2531 }
2532 }
2533 break;
2534 }
2535
2536 if (scb->expect == 0) {
2537 /* Complete frame */
2538
2539 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002540 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302541
2542 hdev->reassembly[index] = NULL;
2543 return remain;
2544 }
2545 }
2546
2547 return remain;
2548}
2549
Marcel Holtmannef222012007-07-11 06:42:04 +02002550int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2551{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302552 int rem = 0;
2553
Marcel Holtmannef222012007-07-11 06:42:04 +02002554 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2555 return -EILSEQ;
2556
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002557 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002558 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302559 if (rem < 0)
2560 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002561
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302562 data += (count - rem);
2563 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002564 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002565
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302566 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002567}
2568EXPORT_SYMBOL(hci_recv_fragment);
2569
Suraj Sumangala99811512010-07-14 13:02:19 +05302570#define STREAM_REASSEMBLY 0
2571
2572int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2573{
2574 int type;
2575 int rem = 0;
2576
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002577 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302578 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2579
2580 if (!skb) {
2581 struct { char type; } *pkt;
2582
2583 /* Start of the frame */
2584 pkt = data;
2585 type = pkt->type;
2586
2587 data++;
2588 count--;
2589 } else
2590 type = bt_cb(skb)->pkt_type;
2591
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002592 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002593 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302594 if (rem < 0)
2595 return rem;
2596
2597 data += (count - rem);
2598 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002599 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302600
2601 return rem;
2602}
2603EXPORT_SYMBOL(hci_recv_stream_fragment);
2604
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605/* ---- Interface to upper protocols ---- */
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607int hci_register_cb(struct hci_cb *cb)
2608{
2609 BT_DBG("%p name %s", cb, cb->name);
2610
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002611 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002613 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
2615 return 0;
2616}
2617EXPORT_SYMBOL(hci_register_cb);
2618
2619int hci_unregister_cb(struct hci_cb *cb)
2620{
2621 BT_DBG("%p name %s", cb, cb->name);
2622
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002623 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002625 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626
2627 return 0;
2628}
2629EXPORT_SYMBOL(hci_unregister_cb);
2630
Marcel Holtmann51086992013-10-10 14:54:19 -07002631static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002633 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002635 /* Time stamp */
2636 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002638 /* Send copy to monitor */
2639 hci_send_to_monitor(hdev, skb);
2640
2641 if (atomic_read(&hdev->promisc)) {
2642 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002643 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 }
2645
2646 /* Get rid of skb owner, prior to sending to the driver. */
2647 skb_orphan(skb);
2648
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002649 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002650 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651}
2652
Johan Hedberg3119ae92013-03-05 20:37:44 +02002653void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2654{
2655 skb_queue_head_init(&req->cmd_q);
2656 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002657 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002658}
2659
2660int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2661{
2662 struct hci_dev *hdev = req->hdev;
2663 struct sk_buff *skb;
2664 unsigned long flags;
2665
2666 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2667
Andre Guedes5d73e032013-03-08 11:20:16 -03002668 /* If an error occured during request building, remove all HCI
2669 * commands queued on the HCI request queue.
2670 */
2671 if (req->err) {
2672 skb_queue_purge(&req->cmd_q);
2673 return req->err;
2674 }
2675
Johan Hedberg3119ae92013-03-05 20:37:44 +02002676 /* Do not allow empty requests */
2677 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002678 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002679
2680 skb = skb_peek_tail(&req->cmd_q);
2681 bt_cb(skb)->req.complete = complete;
2682
2683 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2684 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2685 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2686
2687 queue_work(hdev->workqueue, &hdev->cmd_work);
2688
2689 return 0;
2690}
2691
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002692static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002693 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694{
2695 int len = HCI_COMMAND_HDR_SIZE + plen;
2696 struct hci_command_hdr *hdr;
2697 struct sk_buff *skb;
2698
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002700 if (!skb)
2701 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702
2703 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002704 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 hdr->plen = plen;
2706
2707 if (plen)
2708 memcpy(skb_put(skb, plen), param, plen);
2709
2710 BT_DBG("skb len %d", skb->len);
2711
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002712 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002713
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002714 return skb;
2715}
2716
2717/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002718int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2719 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002720{
2721 struct sk_buff *skb;
2722
2723 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2724
2725 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2726 if (!skb) {
2727 BT_ERR("%s no memory for command", hdev->name);
2728 return -ENOMEM;
2729 }
2730
Johan Hedberg11714b32013-03-05 20:37:47 +02002731 /* Stand-alone HCI commands must be flaged as
2732 * single-command requests.
2733 */
2734 bt_cb(skb)->req.start = true;
2735
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002737 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738
2739 return 0;
2740}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741
Johan Hedberg71c76a12013-03-05 20:37:46 +02002742/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002743void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2744 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002745{
2746 struct hci_dev *hdev = req->hdev;
2747 struct sk_buff *skb;
2748
2749 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2750
Andre Guedes34739c12013-03-08 11:20:18 -03002751 /* If an error occured during request building, there is no point in
2752 * queueing the HCI command. We can simply return.
2753 */
2754 if (req->err)
2755 return;
2756
Johan Hedberg71c76a12013-03-05 20:37:46 +02002757 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2758 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002759 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2760 hdev->name, opcode);
2761 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002762 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002763 }
2764
2765 if (skb_queue_empty(&req->cmd_q))
2766 bt_cb(skb)->req.start = true;
2767
Johan Hedberg02350a72013-04-03 21:50:29 +03002768 bt_cb(skb)->req.event = event;
2769
Johan Hedberg71c76a12013-03-05 20:37:46 +02002770 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002771}
2772
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002773void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2774 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002775{
2776 hci_req_add_ev(req, opcode, plen, param, 0);
2777}
2778
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002780void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781{
2782 struct hci_command_hdr *hdr;
2783
2784 if (!hdev->sent_cmd)
2785 return NULL;
2786
2787 hdr = (void *) hdev->sent_cmd->data;
2788
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002789 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 return NULL;
2791
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002792 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793
2794 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2795}
2796
2797/* Send ACL data */
2798static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2799{
2800 struct hci_acl_hdr *hdr;
2801 int len = skb->len;
2802
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002803 skb_push(skb, HCI_ACL_HDR_SIZE);
2804 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002805 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002806 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2807 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808}
2809
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002810static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002811 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002813 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 struct hci_dev *hdev = conn->hdev;
2815 struct sk_buff *list;
2816
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002817 skb->len = skb_headlen(skb);
2818 skb->data_len = 0;
2819
2820 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002821
2822 switch (hdev->dev_type) {
2823 case HCI_BREDR:
2824 hci_add_acl_hdr(skb, conn->handle, flags);
2825 break;
2826 case HCI_AMP:
2827 hci_add_acl_hdr(skb, chan->handle, flags);
2828 break;
2829 default:
2830 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2831 return;
2832 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002833
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002834 list = skb_shinfo(skb)->frag_list;
2835 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 /* Non fragmented */
2837 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2838
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002839 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 } else {
2841 /* Fragmented */
2842 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2843
2844 skb_shinfo(skb)->frag_list = NULL;
2845
2846 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002847 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002849 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002850
2851 flags &= ~ACL_START;
2852 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 do {
2854 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002855
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002856 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002857 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858
2859 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2860
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002861 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 } while (list);
2863
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002864 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002866}
2867
2868void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2869{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002870 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002871
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002872 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002873
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002874 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002876 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
2879/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002880void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881{
2882 struct hci_dev *hdev = conn->hdev;
2883 struct hci_sco_hdr hdr;
2884
2885 BT_DBG("%s len %d", hdev->name, skb->len);
2886
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002887 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 hdr.dlen = skb->len;
2889
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002890 skb_push(skb, HCI_SCO_HDR_SIZE);
2891 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002892 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002894 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002895
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002897 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
2900/* ---- HCI TX task (outgoing data) ---- */
2901
2902/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002903static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2904 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905{
2906 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002907 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002908 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002910 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002912
2913 rcu_read_lock();
2914
2915 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002916 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002918
2919 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2920 continue;
2921
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 num++;
2923
2924 if (c->sent < min) {
2925 min = c->sent;
2926 conn = c;
2927 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002928
2929 if (hci_conn_num(hdev, type) == num)
2930 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 }
2932
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002933 rcu_read_unlock();
2934
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002936 int cnt, q;
2937
2938 switch (conn->type) {
2939 case ACL_LINK:
2940 cnt = hdev->acl_cnt;
2941 break;
2942 case SCO_LINK:
2943 case ESCO_LINK:
2944 cnt = hdev->sco_cnt;
2945 break;
2946 case LE_LINK:
2947 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2948 break;
2949 default:
2950 cnt = 0;
2951 BT_ERR("Unknown link type");
2952 }
2953
2954 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 *quote = q ? q : 1;
2956 } else
2957 *quote = 0;
2958
2959 BT_DBG("conn %p quote %d", conn, *quote);
2960 return conn;
2961}
2962
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002963static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964{
2965 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002966 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967
Ville Tervobae1f5d92011-02-10 22:38:53 -03002968 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002970 rcu_read_lock();
2971
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002973 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002974 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002975 BT_ERR("%s killing stalled connection %pMR",
2976 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002977 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 }
2979 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002980
2981 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982}
2983
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002984static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2985 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002986{
2987 struct hci_conn_hash *h = &hdev->conn_hash;
2988 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002989 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002990 struct hci_conn *conn;
2991 int cnt, q, conn_num = 0;
2992
2993 BT_DBG("%s", hdev->name);
2994
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002995 rcu_read_lock();
2996
2997 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002998 struct hci_chan *tmp;
2999
3000 if (conn->type != type)
3001 continue;
3002
3003 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3004 continue;
3005
3006 conn_num++;
3007
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003008 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003009 struct sk_buff *skb;
3010
3011 if (skb_queue_empty(&tmp->data_q))
3012 continue;
3013
3014 skb = skb_peek(&tmp->data_q);
3015 if (skb->priority < cur_prio)
3016 continue;
3017
3018 if (skb->priority > cur_prio) {
3019 num = 0;
3020 min = ~0;
3021 cur_prio = skb->priority;
3022 }
3023
3024 num++;
3025
3026 if (conn->sent < min) {
3027 min = conn->sent;
3028 chan = tmp;
3029 }
3030 }
3031
3032 if (hci_conn_num(hdev, type) == conn_num)
3033 break;
3034 }
3035
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003036 rcu_read_unlock();
3037
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003038 if (!chan)
3039 return NULL;
3040
3041 switch (chan->conn->type) {
3042 case ACL_LINK:
3043 cnt = hdev->acl_cnt;
3044 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003045 case AMP_LINK:
3046 cnt = hdev->block_cnt;
3047 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003048 case SCO_LINK:
3049 case ESCO_LINK:
3050 cnt = hdev->sco_cnt;
3051 break;
3052 case LE_LINK:
3053 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3054 break;
3055 default:
3056 cnt = 0;
3057 BT_ERR("Unknown link type");
3058 }
3059
3060 q = cnt / num;
3061 *quote = q ? q : 1;
3062 BT_DBG("chan %p quote %d", chan, *quote);
3063 return chan;
3064}
3065
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003066static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3067{
3068 struct hci_conn_hash *h = &hdev->conn_hash;
3069 struct hci_conn *conn;
3070 int num = 0;
3071
3072 BT_DBG("%s", hdev->name);
3073
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003074 rcu_read_lock();
3075
3076 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003077 struct hci_chan *chan;
3078
3079 if (conn->type != type)
3080 continue;
3081
3082 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3083 continue;
3084
3085 num++;
3086
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003087 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003088 struct sk_buff *skb;
3089
3090 if (chan->sent) {
3091 chan->sent = 0;
3092 continue;
3093 }
3094
3095 if (skb_queue_empty(&chan->data_q))
3096 continue;
3097
3098 skb = skb_peek(&chan->data_q);
3099 if (skb->priority >= HCI_PRIO_MAX - 1)
3100 continue;
3101
3102 skb->priority = HCI_PRIO_MAX - 1;
3103
3104 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003105 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003106 }
3107
3108 if (hci_conn_num(hdev, type) == num)
3109 break;
3110 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003111
3112 rcu_read_unlock();
3113
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003114}
3115
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003116static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3117{
3118 /* Calculate count of blocks used by this packet */
3119 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3120}
3121
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003122static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 if (!test_bit(HCI_RAW, &hdev->flags)) {
3125 /* ACL tx timeout must be longer than maximum
3126 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003127 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003128 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003129 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003131}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003133static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003134{
3135 unsigned int cnt = hdev->acl_cnt;
3136 struct hci_chan *chan;
3137 struct sk_buff *skb;
3138 int quote;
3139
3140 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003141
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003142 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003143 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003144 u32 priority = (skb_peek(&chan->data_q))->priority;
3145 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003146 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003147 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003148
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003149 /* Stop if priority has changed */
3150 if (skb->priority < priority)
3151 break;
3152
3153 skb = skb_dequeue(&chan->data_q);
3154
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003155 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003156 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003157
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003158 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 hdev->acl_last_tx = jiffies;
3160
3161 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003162 chan->sent++;
3163 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 }
3165 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003166
3167 if (cnt != hdev->acl_cnt)
3168 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169}
3170
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003171static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003172{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003173 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003174 struct hci_chan *chan;
3175 struct sk_buff *skb;
3176 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003177 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003178
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003179 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003180
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003181 BT_DBG("%s", hdev->name);
3182
3183 if (hdev->dev_type == HCI_AMP)
3184 type = AMP_LINK;
3185 else
3186 type = ACL_LINK;
3187
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003188 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003189 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003190 u32 priority = (skb_peek(&chan->data_q))->priority;
3191 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3192 int blocks;
3193
3194 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003195 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003196
3197 /* Stop if priority has changed */
3198 if (skb->priority < priority)
3199 break;
3200
3201 skb = skb_dequeue(&chan->data_q);
3202
3203 blocks = __get_blocks(hdev, skb);
3204 if (blocks > hdev->block_cnt)
3205 return;
3206
3207 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003208 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003209
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003210 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003211 hdev->acl_last_tx = jiffies;
3212
3213 hdev->block_cnt -= blocks;
3214 quote -= blocks;
3215
3216 chan->sent += blocks;
3217 chan->conn->sent += blocks;
3218 }
3219 }
3220
3221 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003222 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003223}
3224
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003225static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003226{
3227 BT_DBG("%s", hdev->name);
3228
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003229 /* No ACL link over BR/EDR controller */
3230 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3231 return;
3232
3233 /* No AMP link over AMP controller */
3234 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003235 return;
3236
3237 switch (hdev->flow_ctl_mode) {
3238 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3239 hci_sched_acl_pkt(hdev);
3240 break;
3241
3242 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3243 hci_sched_acl_blk(hdev);
3244 break;
3245 }
3246}
3247
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003249static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250{
3251 struct hci_conn *conn;
3252 struct sk_buff *skb;
3253 int quote;
3254
3255 BT_DBG("%s", hdev->name);
3256
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003257 if (!hci_conn_num(hdev, SCO_LINK))
3258 return;
3259
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3261 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3262 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003263 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264
3265 conn->sent++;
3266 if (conn->sent == ~0)
3267 conn->sent = 0;
3268 }
3269 }
3270}
3271
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003272static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003273{
3274 struct hci_conn *conn;
3275 struct sk_buff *skb;
3276 int quote;
3277
3278 BT_DBG("%s", hdev->name);
3279
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003280 if (!hci_conn_num(hdev, ESCO_LINK))
3281 return;
3282
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003283 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3284 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003285 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3286 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003287 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003288
3289 conn->sent++;
3290 if (conn->sent == ~0)
3291 conn->sent = 0;
3292 }
3293 }
3294}
3295
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003296static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003297{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003298 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003299 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003300 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003301
3302 BT_DBG("%s", hdev->name);
3303
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003304 if (!hci_conn_num(hdev, LE_LINK))
3305 return;
3306
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003307 if (!test_bit(HCI_RAW, &hdev->flags)) {
3308 /* LE tx timeout must be longer than maximum
3309 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003310 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003311 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003312 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003313 }
3314
3315 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003316 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003317 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003318 u32 priority = (skb_peek(&chan->data_q))->priority;
3319 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003320 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003321 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003322
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003323 /* Stop if priority has changed */
3324 if (skb->priority < priority)
3325 break;
3326
3327 skb = skb_dequeue(&chan->data_q);
3328
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003329 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003330 hdev->le_last_tx = jiffies;
3331
3332 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003333 chan->sent++;
3334 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003335 }
3336 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003337
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003338 if (hdev->le_pkts)
3339 hdev->le_cnt = cnt;
3340 else
3341 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003342
3343 if (cnt != tmp)
3344 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003345}
3346
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003347static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003349 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 struct sk_buff *skb;
3351
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003352 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003353 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
Marcel Holtmann52de5992013-09-03 18:08:38 -07003355 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3356 /* Schedule queues and send stuff to HCI driver */
3357 hci_sched_acl(hdev);
3358 hci_sched_sco(hdev);
3359 hci_sched_esco(hdev);
3360 hci_sched_le(hdev);
3361 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003362
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 /* Send next queued raw (unknown type) packet */
3364 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003365 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366}
3367
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003368/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369
3370/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003371static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372{
3373 struct hci_acl_hdr *hdr = (void *) skb->data;
3374 struct hci_conn *conn;
3375 __u16 handle, flags;
3376
3377 skb_pull(skb, HCI_ACL_HDR_SIZE);
3378
3379 handle = __le16_to_cpu(hdr->handle);
3380 flags = hci_flags(handle);
3381 handle = hci_handle(handle);
3382
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003383 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003384 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385
3386 hdev->stat.acl_rx++;
3387
3388 hci_dev_lock(hdev);
3389 conn = hci_conn_hash_lookup_handle(hdev, handle);
3390 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003391
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003393 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003394
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003396 l2cap_recv_acldata(conn, skb, flags);
3397 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003399 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003400 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401 }
3402
3403 kfree_skb(skb);
3404}
3405
3406/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003407static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408{
3409 struct hci_sco_hdr *hdr = (void *) skb->data;
3410 struct hci_conn *conn;
3411 __u16 handle;
3412
3413 skb_pull(skb, HCI_SCO_HDR_SIZE);
3414
3415 handle = __le16_to_cpu(hdr->handle);
3416
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003417 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418
3419 hdev->stat.sco_rx++;
3420
3421 hci_dev_lock(hdev);
3422 conn = hci_conn_hash_lookup_handle(hdev, handle);
3423 hci_dev_unlock(hdev);
3424
3425 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003427 sco_recv_scodata(conn, skb);
3428 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003430 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003431 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 }
3433
3434 kfree_skb(skb);
3435}
3436
Johan Hedberg9238f362013-03-05 20:37:48 +02003437static bool hci_req_is_complete(struct hci_dev *hdev)
3438{
3439 struct sk_buff *skb;
3440
3441 skb = skb_peek(&hdev->cmd_q);
3442 if (!skb)
3443 return true;
3444
3445 return bt_cb(skb)->req.start;
3446}
3447
Johan Hedberg42c6b122013-03-05 20:37:49 +02003448static void hci_resend_last(struct hci_dev *hdev)
3449{
3450 struct hci_command_hdr *sent;
3451 struct sk_buff *skb;
3452 u16 opcode;
3453
3454 if (!hdev->sent_cmd)
3455 return;
3456
3457 sent = (void *) hdev->sent_cmd->data;
3458 opcode = __le16_to_cpu(sent->opcode);
3459 if (opcode == HCI_OP_RESET)
3460 return;
3461
3462 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3463 if (!skb)
3464 return;
3465
3466 skb_queue_head(&hdev->cmd_q, skb);
3467 queue_work(hdev->workqueue, &hdev->cmd_work);
3468}
3469
Johan Hedberg9238f362013-03-05 20:37:48 +02003470void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3471{
3472 hci_req_complete_t req_complete = NULL;
3473 struct sk_buff *skb;
3474 unsigned long flags;
3475
3476 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3477
Johan Hedberg42c6b122013-03-05 20:37:49 +02003478 /* If the completed command doesn't match the last one that was
3479 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003480 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003481 if (!hci_sent_cmd_data(hdev, opcode)) {
3482 /* Some CSR based controllers generate a spontaneous
3483 * reset complete event during init and any pending
3484 * command will never be completed. In such a case we
3485 * need to resend whatever was the last sent
3486 * command.
3487 */
3488 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3489 hci_resend_last(hdev);
3490
Johan Hedberg9238f362013-03-05 20:37:48 +02003491 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003492 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003493
3494 /* If the command succeeded and there's still more commands in
3495 * this request the request is not yet complete.
3496 */
3497 if (!status && !hci_req_is_complete(hdev))
3498 return;
3499
3500 /* If this was the last command in a request the complete
3501 * callback would be found in hdev->sent_cmd instead of the
3502 * command queue (hdev->cmd_q).
3503 */
3504 if (hdev->sent_cmd) {
3505 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003506
3507 if (req_complete) {
3508 /* We must set the complete callback to NULL to
3509 * avoid calling the callback more than once if
3510 * this function gets called again.
3511 */
3512 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3513
Johan Hedberg9238f362013-03-05 20:37:48 +02003514 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003515 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003516 }
3517
3518 /* Remove all pending commands belonging to this request */
3519 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3520 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3521 if (bt_cb(skb)->req.start) {
3522 __skb_queue_head(&hdev->cmd_q, skb);
3523 break;
3524 }
3525
3526 req_complete = bt_cb(skb)->req.complete;
3527 kfree_skb(skb);
3528 }
3529 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3530
3531call_complete:
3532 if (req_complete)
3533 req_complete(hdev, status);
3534}
3535
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003536static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003538 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 struct sk_buff *skb;
3540
3541 BT_DBG("%s", hdev->name);
3542
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003544 /* Send copy to monitor */
3545 hci_send_to_monitor(hdev, skb);
3546
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 if (atomic_read(&hdev->promisc)) {
3548 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003549 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 }
3551
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003552 if (test_bit(HCI_RAW, &hdev->flags) ||
3553 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554 kfree_skb(skb);
3555 continue;
3556 }
3557
3558 if (test_bit(HCI_INIT, &hdev->flags)) {
3559 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003560 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 case HCI_ACLDATA_PKT:
3562 case HCI_SCODATA_PKT:
3563 kfree_skb(skb);
3564 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003565 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566 }
3567
3568 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003569 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003571 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572 hci_event_packet(hdev, skb);
3573 break;
3574
3575 case HCI_ACLDATA_PKT:
3576 BT_DBG("%s ACL data packet", hdev->name);
3577 hci_acldata_packet(hdev, skb);
3578 break;
3579
3580 case HCI_SCODATA_PKT:
3581 BT_DBG("%s SCO data packet", hdev->name);
3582 hci_scodata_packet(hdev, skb);
3583 break;
3584
3585 default:
3586 kfree_skb(skb);
3587 break;
3588 }
3589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590}
3591
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003592static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003594 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 struct sk_buff *skb;
3596
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003597 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3598 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003601 if (atomic_read(&hdev->cmd_cnt)) {
3602 skb = skb_dequeue(&hdev->cmd_q);
3603 if (!skb)
3604 return;
3605
Wei Yongjun7585b972009-02-25 18:29:52 +08003606 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003608 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003609 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003611 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003612 if (test_bit(HCI_RESET, &hdev->flags))
3613 del_timer(&hdev->cmd_timer);
3614 else
3615 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003616 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 } else {
3618 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003619 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 }
3621 }
3622}