blob: 082f3966f89414c0946e763d0b9ff372de388625 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700321
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200332
333 BT_DBG("%s %ld", hdev->name, opt);
334
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300338
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339 switch (hdev->dev_type) {
340 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200342 break;
343
344 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200345 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200352}
353
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700356 struct hci_dev *hdev = req->hdev;
357
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358 __le16 param;
359 __u8 flt_type;
360
361 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200362 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200363
364 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
367 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200369
370 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372
373 /* Clear Event Filters */
374 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200376
377 /* Connection accept timeout ~20 secs */
378 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700381 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
382 * but it does not support page scan related HCI commands.
383 */
384 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500385 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
386 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
387 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388}
389
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200391{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392 struct hci_dev *hdev = req->hdev;
393
Johan Hedberg2177bab2013-03-05 20:37:43 +0200394 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396
397 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200399
400 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200402
403 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200404 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200405
406 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300408
409 /* LE-only controllers have LE implicitly enabled */
410 if (!lmp_bredr_capable(hdev))
411 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200412}
413
414static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
415{
416 if (lmp_ext_inq_capable(hdev))
417 return 0x02;
418
419 if (lmp_inq_rssi_capable(hdev))
420 return 0x01;
421
422 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
423 hdev->lmp_subver == 0x0757)
424 return 0x01;
425
426 if (hdev->manufacturer == 15) {
427 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
428 return 0x01;
429 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
430 return 0x01;
431 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
432 return 0x01;
433 }
434
435 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
436 hdev->lmp_subver == 0x1805)
437 return 0x01;
438
439 return 0x00;
440}
441
Johan Hedberg42c6b122013-03-05 20:37:49 +0200442static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200443{
444 u8 mode;
445
Johan Hedberg42c6b122013-03-05 20:37:49 +0200446 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200447
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449}
450
Johan Hedberg42c6b122013-03-05 20:37:49 +0200451static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200452{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 struct hci_dev *hdev = req->hdev;
454
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455 /* The second byte is 0xff instead of 0x9f (two reserved bits
456 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
457 * command otherwise.
458 */
459 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
460
461 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
462 * any event mask for pre 1.2 devices.
463 */
464 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
465 return;
466
467 if (lmp_bredr_capable(hdev)) {
468 events[4] |= 0x01; /* Flow Specification Complete */
469 events[4] |= 0x02; /* Inquiry Result with RSSI */
470 events[4] |= 0x04; /* Read Remote Extended Features Complete */
471 events[5] |= 0x08; /* Synchronous Connection Complete */
472 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700473 } else {
474 /* Use a different default for LE-only devices */
475 memset(events, 0, sizeof(events));
476 events[0] |= 0x10; /* Disconnection Complete */
477 events[0] |= 0x80; /* Encryption Change */
478 events[1] |= 0x08; /* Read Remote Version Information Complete */
479 events[1] |= 0x20; /* Command Complete */
480 events[1] |= 0x40; /* Command Status */
481 events[1] |= 0x80; /* Hardware Error */
482 events[2] |= 0x04; /* Number of Completed Packets */
483 events[3] |= 0x02; /* Data Buffer Overflow */
484 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485 }
486
487 if (lmp_inq_rssi_capable(hdev))
488 events[4] |= 0x02; /* Inquiry Result with RSSI */
489
490 if (lmp_sniffsubr_capable(hdev))
491 events[5] |= 0x20; /* Sniff Subrating */
492
493 if (lmp_pause_enc_capable(hdev))
494 events[5] |= 0x80; /* Encryption Key Refresh Complete */
495
496 if (lmp_ext_inq_capable(hdev))
497 events[5] |= 0x40; /* Extended Inquiry Result */
498
499 if (lmp_no_flush_capable(hdev))
500 events[7] |= 0x01; /* Enhanced Flush Complete */
501
502 if (lmp_lsto_capable(hdev))
503 events[6] |= 0x80; /* Link Supervision Timeout Changed */
504
505 if (lmp_ssp_capable(hdev)) {
506 events[6] |= 0x01; /* IO Capability Request */
507 events[6] |= 0x02; /* IO Capability Response */
508 events[6] |= 0x04; /* User Confirmation Request */
509 events[6] |= 0x08; /* User Passkey Request */
510 events[6] |= 0x10; /* Remote OOB Data Request */
511 events[6] |= 0x20; /* Simple Pairing Complete */
512 events[7] |= 0x04; /* User Passkey Notification */
513 events[7] |= 0x08; /* Keypress Notification */
514 events[7] |= 0x10; /* Remote Host Supported
515 * Features Notification
516 */
517 }
518
519 if (lmp_le_capable(hdev))
520 events[7] |= 0x20; /* LE Meta-Event */
521
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200523
524 if (lmp_le_capable(hdev)) {
525 memset(events, 0, sizeof(events));
526 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200527 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
528 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 }
530}
531
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 struct hci_dev *hdev = req->hdev;
535
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300538 else
539 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200540
541 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200542 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200543
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200545
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300546 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
547 * local supported commands HCI command.
548 */
549 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_ssp_capable(hdev)) {
553 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
554 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
556 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557 } else {
558 struct hci_cp_write_eir cp;
559
560 memset(hdev->eir, 0, sizeof(hdev->eir));
561 memset(&cp, 0, sizeof(cp));
562
Johan Hedberg42c6b122013-03-05 20:37:49 +0200563 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200564 }
565 }
566
567 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200568 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200569
570 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200571 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200572
573 if (lmp_ext_feat_capable(hdev)) {
574 struct hci_cp_read_local_ext_features cp;
575
576 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
578 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200579 }
580
581 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
582 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
584 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585 }
586}
587
Johan Hedberg42c6b122013-03-05 20:37:49 +0200588static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200589{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200590 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200591 struct hci_cp_write_def_link_policy cp;
592 u16 link_policy = 0;
593
594 if (lmp_rswitch_capable(hdev))
595 link_policy |= HCI_LP_RSWITCH;
596 if (lmp_hold_capable(hdev))
597 link_policy |= HCI_LP_HOLD;
598 if (lmp_sniff_capable(hdev))
599 link_policy |= HCI_LP_SNIFF;
600 if (lmp_park_capable(hdev))
601 link_policy |= HCI_LP_PARK;
602
603 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200604 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200605}
606
Johan Hedberg42c6b122013-03-05 20:37:49 +0200607static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200609 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610 struct hci_cp_write_le_host_supported cp;
611
Johan Hedbergc73eee92013-04-19 18:35:21 +0300612 /* LE-only devices do not support explicit enablement */
613 if (!lmp_bredr_capable(hdev))
614 return;
615
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616 memset(&cp, 0, sizeof(cp));
617
618 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
619 cp.le = 0x01;
620 cp.simul = lmp_le_br_capable(hdev);
621 }
622
623 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
625 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200626}
627
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300628static void hci_set_event_mask_page_2(struct hci_request *req)
629{
630 struct hci_dev *hdev = req->hdev;
631 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
632
633 /* If Connectionless Slave Broadcast master role is supported
634 * enable all necessary events for it.
635 */
636 if (hdev->features[2][0] & 0x01) {
637 events[1] |= 0x40; /* Triggered Clock Capture */
638 events[1] |= 0x80; /* Synchronization Train Complete */
639 events[2] |= 0x10; /* Slave Page Response Timeout */
640 events[2] |= 0x20; /* CSB Channel Map Change */
641 }
642
643 /* If Connectionless Slave Broadcast slave role is supported
644 * enable all necessary events for it.
645 */
646 if (hdev->features[2][0] & 0x02) {
647 events[2] |= 0x01; /* Synchronization Train Received */
648 events[2] |= 0x02; /* CSB Receive */
649 events[2] |= 0x04; /* CSB Timeout */
650 events[2] |= 0x08; /* Truncated Page Complete */
651 }
652
653 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
654}
655
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300659 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200660
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100661 /* Some Broadcom based Bluetooth controllers do not support the
662 * Delete Stored Link Key command. They are clearly indicating its
663 * absence in the bit mask of supported commands.
664 *
665 * Check the supported commands and only if the the command is marked
666 * as supported send it. If not supported assume that the controller
667 * does not have actual support for stored link keys which makes this
668 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700669 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300670 if (hdev->commands[6] & 0x80) {
671 struct hci_cp_delete_stored_link_key cp;
672
673 bacpy(&cp.bdaddr, BDADDR_ANY);
674 cp.delete_all = 0x01;
675 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
676 sizeof(cp), &cp);
677 }
678
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200681
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500682 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500684 hci_update_ad(req);
685 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300686
687 /* Read features beyond page 1 if available */
688 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
689 struct hci_cp_read_local_ext_features cp;
690
691 cp.page = p;
692 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
693 sizeof(cp), &cp);
694 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695}
696
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300697static void hci_init4_req(struct hci_request *req, unsigned long opt)
698{
699 struct hci_dev *hdev = req->hdev;
700
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300701 /* Set event mask page 2 if the HCI command for it is supported */
702 if (hdev->commands[22] & 0x04)
703 hci_set_event_mask_page_2(req);
704
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300705 /* Check for Synchronization Train support */
706 if (hdev->features[2][0] & 0x04)
707 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
708}
709
Johan Hedberg2177bab2013-03-05 20:37:43 +0200710static int __hci_init(struct hci_dev *hdev)
711{
712 int err;
713
714 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
715 if (err < 0)
716 return err;
717
718 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
719 * BR/EDR/LE type controllers. AMP controllers only need the
720 * first stage init.
721 */
722 if (hdev->dev_type != HCI_BREDR)
723 return 0;
724
725 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
726 if (err < 0)
727 return err;
728
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300729 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
730 if (err < 0)
731 return err;
732
733 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200734}
735
Johan Hedberg42c6b122013-03-05 20:37:49 +0200736static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
738 __u8 scan = opt;
739
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200743 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744}
745
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747{
748 __u8 auth = opt;
749
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200753 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754}
755
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757{
758 __u8 encrypt = opt;
759
Johan Hedberg42c6b122013-03-05 20:37:49 +0200760 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200762 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200763 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764}
765
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200767{
768 __le16 policy = cpu_to_le16(opt);
769
Johan Hedberg42c6b122013-03-05 20:37:49 +0200770 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200771
772 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200773 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200774}
775
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900776/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 * Device is held on return. */
778struct hci_dev *hci_dev_get(int index)
779{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200780 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 BT_DBG("%d", index);
783
784 if (index < 0)
785 return NULL;
786
787 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200788 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 if (d->id == index) {
790 hdev = hci_dev_hold(d);
791 break;
792 }
793 }
794 read_unlock(&hci_dev_list_lock);
795 return hdev;
796}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
798/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200799
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200800bool hci_discovery_active(struct hci_dev *hdev)
801{
802 struct discovery_state *discov = &hdev->discovery;
803
Andre Guedes6fbe1952012-02-03 17:47:58 -0300804 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300805 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300806 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200807 return true;
808
Andre Guedes6fbe1952012-02-03 17:47:58 -0300809 default:
810 return false;
811 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200812}
813
Johan Hedbergff9ef572012-01-04 14:23:45 +0200814void hci_discovery_set_state(struct hci_dev *hdev, int state)
815{
816 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
817
818 if (hdev->discovery.state == state)
819 return;
820
821 switch (state) {
822 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300823 if (hdev->discovery.state != DISCOVERY_STARTING)
824 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200825 break;
826 case DISCOVERY_STARTING:
827 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300828 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200829 mgmt_discovering(hdev, 1);
830 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200831 case DISCOVERY_RESOLVING:
832 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200833 case DISCOVERY_STOPPING:
834 break;
835 }
836
837 hdev->discovery.state = state;
838}
839
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300840void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841{
Johan Hedberg30883512012-01-04 14:16:21 +0200842 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200843 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
Johan Hedberg561aafb2012-01-04 13:31:59 +0200845 list_for_each_entry_safe(p, n, &cache->all, all) {
846 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200847 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200849
850 INIT_LIST_HEAD(&cache->unknown);
851 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300854struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
855 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856{
Johan Hedberg30883512012-01-04 14:16:21 +0200857 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 struct inquiry_entry *e;
859
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300860 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
Johan Hedberg561aafb2012-01-04 13:31:59 +0200862 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200864 return e;
865 }
866
867 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868}
869
Johan Hedberg561aafb2012-01-04 13:31:59 +0200870struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300871 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200872{
Johan Hedberg30883512012-01-04 14:16:21 +0200873 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200874 struct inquiry_entry *e;
875
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300876 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200877
878 list_for_each_entry(e, &cache->unknown, list) {
879 if (!bacmp(&e->data.bdaddr, bdaddr))
880 return e;
881 }
882
883 return NULL;
884}
885
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200886struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300887 bdaddr_t *bdaddr,
888 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200889{
890 struct discovery_state *cache = &hdev->discovery;
891 struct inquiry_entry *e;
892
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300893 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200894
895 list_for_each_entry(e, &cache->resolve, list) {
896 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
897 return e;
898 if (!bacmp(&e->data.bdaddr, bdaddr))
899 return e;
900 }
901
902 return NULL;
903}
904
Johan Hedberga3d4e202012-01-09 00:53:02 +0200905void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300906 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200907{
908 struct discovery_state *cache = &hdev->discovery;
909 struct list_head *pos = &cache->resolve;
910 struct inquiry_entry *p;
911
912 list_del(&ie->list);
913
914 list_for_each_entry(p, &cache->resolve, list) {
915 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300916 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200917 break;
918 pos = &p->list;
919 }
920
921 list_add(&ie->list, pos);
922}
923
Johan Hedberg31754052012-01-04 13:39:52 +0200924bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300925 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926{
Johan Hedberg30883512012-01-04 14:16:21 +0200927 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200928 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300930 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Szymon Janc2b2fec42012-11-20 11:38:54 +0100932 hci_remove_remote_oob_data(hdev, &data->bdaddr);
933
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200934 if (ssp)
935 *ssp = data->ssp_mode;
936
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200937 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200938 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200939 if (ie->data.ssp_mode && ssp)
940 *ssp = true;
941
Johan Hedberga3d4e202012-01-09 00:53:02 +0200942 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300943 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200944 ie->data.rssi = data->rssi;
945 hci_inquiry_cache_update_resolve(hdev, ie);
946 }
947
Johan Hedberg561aafb2012-01-04 13:31:59 +0200948 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200949 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200950
Johan Hedberg561aafb2012-01-04 13:31:59 +0200951 /* Entry not in the cache. Add new one. */
952 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
953 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200954 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200955
956 list_add(&ie->all, &cache->all);
957
958 if (name_known) {
959 ie->name_state = NAME_KNOWN;
960 } else {
961 ie->name_state = NAME_NOT_KNOWN;
962 list_add(&ie->list, &cache->unknown);
963 }
964
965update:
966 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300967 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200968 ie->name_state = NAME_KNOWN;
969 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 }
971
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200972 memcpy(&ie->data, data, sizeof(*data));
973 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200975
976 if (ie->name_state == NAME_NOT_KNOWN)
977 return false;
978
979 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980}
981
982static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
983{
Johan Hedberg30883512012-01-04 14:16:21 +0200984 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 struct inquiry_info *info = (struct inquiry_info *) buf;
986 struct inquiry_entry *e;
987 int copied = 0;
988
Johan Hedberg561aafb2012-01-04 13:31:59 +0200989 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200991
992 if (copied >= num)
993 break;
994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 bacpy(&info->bdaddr, &data->bdaddr);
996 info->pscan_rep_mode = data->pscan_rep_mode;
997 info->pscan_period_mode = data->pscan_period_mode;
998 info->pscan_mode = data->pscan_mode;
999 memcpy(info->dev_class, data->dev_class, 3);
1000 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001003 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 }
1005
1006 BT_DBG("cache %p, copied %d", cache, copied);
1007 return copied;
1008}
1009
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011{
1012 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001013 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 struct hci_cp_inquiry cp;
1015
1016 BT_DBG("%s", hdev->name);
1017
1018 if (test_bit(HCI_INQUIRY, &hdev->flags))
1019 return;
1020
1021 /* Start Inquiry */
1022 memcpy(&cp.lap, &ir->lap, 3);
1023 cp.length = ir->length;
1024 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026}
1027
Andre Guedes3e13fa12013-03-27 20:04:56 -03001028static int wait_inquiry(void *word)
1029{
1030 schedule();
1031 return signal_pending(current);
1032}
1033
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034int hci_inquiry(void __user *arg)
1035{
1036 __u8 __user *ptr = arg;
1037 struct hci_inquiry_req ir;
1038 struct hci_dev *hdev;
1039 int err = 0, do_inquiry = 0, max_rsp;
1040 long timeo;
1041 __u8 *buf;
1042
1043 if (copy_from_user(&ir, ptr, sizeof(ir)))
1044 return -EFAULT;
1045
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001046 hdev = hci_dev_get(ir.dev_id);
1047 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 return -ENODEV;
1049
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001050 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1051 err = -EBUSY;
1052 goto done;
1053 }
1054
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001055 if (hdev->dev_type != HCI_BREDR) {
1056 err = -EOPNOTSUPP;
1057 goto done;
1058 }
1059
Johan Hedberg56f87902013-10-02 13:43:13 +03001060 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1061 err = -EOPNOTSUPP;
1062 goto done;
1063 }
1064
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001065 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001066 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001067 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001068 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 do_inquiry = 1;
1070 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001071 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
Marcel Holtmann04837f62006-07-03 10:02:33 +02001073 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001074
1075 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001076 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1077 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001078 if (err < 0)
1079 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001080
1081 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1082 * cleared). If it is interrupted by a signal, return -EINTR.
1083 */
1084 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1085 TASK_INTERRUPTIBLE))
1086 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001087 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001089 /* for unlimited number of responses we will use buffer with
1090 * 255 entries
1091 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1093
1094 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1095 * copy it to the user space.
1096 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001097 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001098 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 err = -ENOMEM;
1100 goto done;
1101 }
1102
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001103 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001105 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107 BT_DBG("num_rsp %d", ir.num_rsp);
1108
1109 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1110 ptr += sizeof(ir);
1111 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001112 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001114 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 err = -EFAULT;
1116
1117 kfree(buf);
1118
1119done:
1120 hci_dev_put(hdev);
1121 return err;
1122}
1123
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001124static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1125{
1126 u8 ad_len = 0, flags = 0;
1127 size_t name_len;
1128
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001129 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001130 flags |= LE_AD_GENERAL;
1131
Johan Hedberg11802b22013-10-02 16:02:24 +03001132 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1133 if (lmp_le_br_capable(hdev))
1134 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1135 if (lmp_host_le_br_capable(hdev))
1136 flags |= LE_AD_SIM_LE_BREDR_HOST;
1137 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001138 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001139 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001140
1141 if (flags) {
1142 BT_DBG("adv flags 0x%02x", flags);
1143
1144 ptr[0] = 2;
1145 ptr[1] = EIR_FLAGS;
1146 ptr[2] = flags;
1147
1148 ad_len += 3;
1149 ptr += 3;
1150 }
1151
1152 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1153 ptr[0] = 2;
1154 ptr[1] = EIR_TX_POWER;
1155 ptr[2] = (u8) hdev->adv_tx_power;
1156
1157 ad_len += 3;
1158 ptr += 3;
1159 }
1160
1161 name_len = strlen(hdev->dev_name);
1162 if (name_len > 0) {
1163 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1164
1165 if (name_len > max_len) {
1166 name_len = max_len;
1167 ptr[1] = EIR_NAME_SHORT;
1168 } else
1169 ptr[1] = EIR_NAME_COMPLETE;
1170
1171 ptr[0] = name_len + 1;
1172
1173 memcpy(ptr + 2, hdev->dev_name, name_len);
1174
1175 ad_len += (name_len + 2);
1176 ptr += (name_len + 2);
1177 }
1178
1179 return ad_len;
1180}
1181
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001182void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001183{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001184 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001185 struct hci_cp_le_set_adv_data cp;
1186 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001187
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001188 if (!lmp_le_capable(hdev))
1189 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001190
1191 memset(&cp, 0, sizeof(cp));
1192
1193 len = create_ad(hdev, cp.data);
1194
1195 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001196 memcmp(cp.data, hdev->adv_data, len) == 0)
1197 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001198
1199 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1200 hdev->adv_data_len = len;
1201
1202 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001203
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001204 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001205}
1206
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001207static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 int ret = 0;
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 BT_DBG("%s %p", hdev->name, hdev);
1212
1213 hci_req_lock(hdev);
1214
Johan Hovold94324962012-03-15 14:48:41 +01001215 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1216 ret = -ENODEV;
1217 goto done;
1218 }
1219
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001220 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1221 /* Check for rfkill but allow the HCI setup stage to
1222 * proceed (which in itself doesn't cause any RF activity).
1223 */
1224 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1225 ret = -ERFKILL;
1226 goto done;
1227 }
1228
1229 /* Check for valid public address or a configured static
1230 * random adddress, but let the HCI setup proceed to
1231 * be able to determine if there is a public address
1232 * or not.
1233 *
1234 * This check is only valid for BR/EDR controllers
1235 * since AMP controllers do not have an address.
1236 */
1237 if (hdev->dev_type == HCI_BREDR &&
1238 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1239 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1240 ret = -EADDRNOTAVAIL;
1241 goto done;
1242 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001243 }
1244
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 if (test_bit(HCI_UP, &hdev->flags)) {
1246 ret = -EALREADY;
1247 goto done;
1248 }
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 if (hdev->open(hdev)) {
1251 ret = -EIO;
1252 goto done;
1253 }
1254
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001255 atomic_set(&hdev->cmd_cnt, 1);
1256 set_bit(HCI_INIT, &hdev->flags);
1257
1258 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1259 ret = hdev->setup(hdev);
1260
1261 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001262 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1263 set_bit(HCI_RAW, &hdev->flags);
1264
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001265 if (!test_bit(HCI_RAW, &hdev->flags) &&
1266 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001267 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
1269
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001270 clear_bit(HCI_INIT, &hdev->flags);
1271
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 if (!ret) {
1273 hci_dev_hold(hdev);
1274 set_bit(HCI_UP, &hdev->flags);
1275 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001276 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001277 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001278 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001279 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001280 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001281 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001282 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001283 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001285 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001286 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001287 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
1289 skb_queue_purge(&hdev->cmd_q);
1290 skb_queue_purge(&hdev->rx_q);
1291
1292 if (hdev->flush)
1293 hdev->flush(hdev);
1294
1295 if (hdev->sent_cmd) {
1296 kfree_skb(hdev->sent_cmd);
1297 hdev->sent_cmd = NULL;
1298 }
1299
1300 hdev->close(hdev);
1301 hdev->flags = 0;
1302 }
1303
1304done:
1305 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 return ret;
1307}
1308
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001309/* ---- HCI ioctl helpers ---- */
1310
1311int hci_dev_open(__u16 dev)
1312{
1313 struct hci_dev *hdev;
1314 int err;
1315
1316 hdev = hci_dev_get(dev);
1317 if (!hdev)
1318 return -ENODEV;
1319
Johan Hedberge1d08f42013-10-01 22:44:50 +03001320 /* We need to ensure that no other power on/off work is pending
1321 * before proceeding to call hci_dev_do_open. This is
1322 * particularly important if the setup procedure has not yet
1323 * completed.
1324 */
1325 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1326 cancel_delayed_work(&hdev->power_off);
1327
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001328 /* After this call it is guaranteed that the setup procedure
1329 * has finished. This means that error conditions like RFKILL
1330 * or no valid public or static random address apply.
1331 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001332 flush_workqueue(hdev->req_workqueue);
1333
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001334 err = hci_dev_do_open(hdev);
1335
1336 hci_dev_put(hdev);
1337
1338 return err;
1339}
1340
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341static int hci_dev_do_close(struct hci_dev *hdev)
1342{
1343 BT_DBG("%s %p", hdev->name, hdev);
1344
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001345 cancel_delayed_work(&hdev->power_off);
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 hci_req_cancel(hdev, ENODEV);
1348 hci_req_lock(hdev);
1349
1350 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001351 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 hci_req_unlock(hdev);
1353 return 0;
1354 }
1355
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001356 /* Flush RX and TX works */
1357 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001358 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001360 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001361 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001362 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001363 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001364 }
1365
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001366 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001367 cancel_delayed_work(&hdev->service_cache);
1368
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001369 cancel_delayed_work_sync(&hdev->le_scan_disable);
1370
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001371 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001372 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001374 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 hci_notify(hdev, HCI_DEV_DOWN);
1377
1378 if (hdev->flush)
1379 hdev->flush(hdev);
1380
1381 /* Reset device */
1382 skb_queue_purge(&hdev->cmd_q);
1383 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001384 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001385 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001386 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001388 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 clear_bit(HCI_INIT, &hdev->flags);
1390 }
1391
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001392 /* flush cmd work */
1393 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
1395 /* Drop queues */
1396 skb_queue_purge(&hdev->rx_q);
1397 skb_queue_purge(&hdev->cmd_q);
1398 skb_queue_purge(&hdev->raw_q);
1399
1400 /* Drop last sent command */
1401 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001402 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 kfree_skb(hdev->sent_cmd);
1404 hdev->sent_cmd = NULL;
1405 }
1406
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001407 kfree_skb(hdev->recv_evt);
1408 hdev->recv_evt = NULL;
1409
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 /* After this point our queues are empty
1411 * and no tasks are scheduled. */
1412 hdev->close(hdev);
1413
Johan Hedberg35b973c2013-03-15 17:06:59 -05001414 /* Clear flags */
1415 hdev->flags = 0;
1416 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1417
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001418 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1419 if (hdev->dev_type == HCI_BREDR) {
1420 hci_dev_lock(hdev);
1421 mgmt_powered(hdev, 0);
1422 hci_dev_unlock(hdev);
1423 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001424 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001425
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001426 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001427 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001428
Johan Hedberge59fda82012-02-22 18:11:53 +02001429 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001430 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 hci_req_unlock(hdev);
1433
1434 hci_dev_put(hdev);
1435 return 0;
1436}
1437
1438int hci_dev_close(__u16 dev)
1439{
1440 struct hci_dev *hdev;
1441 int err;
1442
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001443 hdev = hci_dev_get(dev);
1444 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001446
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001447 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1448 err = -EBUSY;
1449 goto done;
1450 }
1451
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001452 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1453 cancel_delayed_work(&hdev->power_off);
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001456
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001457done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 hci_dev_put(hdev);
1459 return err;
1460}
1461
1462int hci_dev_reset(__u16 dev)
1463{
1464 struct hci_dev *hdev;
1465 int ret = 0;
1466
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001467 hdev = hci_dev_get(dev);
1468 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 return -ENODEV;
1470
1471 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
Marcel Holtmann808a0492013-08-26 20:57:58 -07001473 if (!test_bit(HCI_UP, &hdev->flags)) {
1474 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001478 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1479 ret = -EBUSY;
1480 goto done;
1481 }
1482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 /* Drop queues */
1484 skb_queue_purge(&hdev->rx_q);
1485 skb_queue_purge(&hdev->cmd_q);
1486
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001487 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001488 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001490 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
1492 if (hdev->flush)
1493 hdev->flush(hdev);
1494
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001495 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001496 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
1498 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001499 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 hci_req_unlock(hdev);
1503 hci_dev_put(hdev);
1504 return ret;
1505}
1506
1507int hci_dev_reset_stat(__u16 dev)
1508{
1509 struct hci_dev *hdev;
1510 int ret = 0;
1511
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001512 hdev = hci_dev_get(dev);
1513 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 return -ENODEV;
1515
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001516 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1517 ret = -EBUSY;
1518 goto done;
1519 }
1520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1522
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001523done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 return ret;
1526}
1527
1528int hci_dev_cmd(unsigned int cmd, void __user *arg)
1529{
1530 struct hci_dev *hdev;
1531 struct hci_dev_req dr;
1532 int err = 0;
1533
1534 if (copy_from_user(&dr, arg, sizeof(dr)))
1535 return -EFAULT;
1536
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001537 hdev = hci_dev_get(dr.dev_id);
1538 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 return -ENODEV;
1540
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001541 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1542 err = -EBUSY;
1543 goto done;
1544 }
1545
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001546 if (hdev->dev_type != HCI_BREDR) {
1547 err = -EOPNOTSUPP;
1548 goto done;
1549 }
1550
Johan Hedberg56f87902013-10-02 13:43:13 +03001551 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1552 err = -EOPNOTSUPP;
1553 goto done;
1554 }
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 switch (cmd) {
1557 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001558 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1559 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 break;
1561
1562 case HCISETENCRYPT:
1563 if (!lmp_encrypt_capable(hdev)) {
1564 err = -EOPNOTSUPP;
1565 break;
1566 }
1567
1568 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1569 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001570 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1571 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 if (err)
1573 break;
1574 }
1575
Johan Hedberg01178cd2013-03-05 20:37:41 +02001576 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1577 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 break;
1579
1580 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001581 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1582 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 break;
1584
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001585 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001586 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1587 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001588 break;
1589
1590 case HCISETLINKMODE:
1591 hdev->link_mode = ((__u16) dr.dev_opt) &
1592 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1593 break;
1594
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 case HCISETPTYPE:
1596 hdev->pkt_type = (__u16) dr.dev_opt;
1597 break;
1598
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001600 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1601 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 break;
1603
1604 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001605 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1606 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 break;
1608
1609 default:
1610 err = -EINVAL;
1611 break;
1612 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001613
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001614done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 hci_dev_put(hdev);
1616 return err;
1617}
1618
1619int hci_get_dev_list(void __user *arg)
1620{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001621 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 struct hci_dev_list_req *dl;
1623 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 int n = 0, size, err;
1625 __u16 dev_num;
1626
1627 if (get_user(dev_num, (__u16 __user *) arg))
1628 return -EFAULT;
1629
1630 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1631 return -EINVAL;
1632
1633 size = sizeof(*dl) + dev_num * sizeof(*dr);
1634
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001635 dl = kzalloc(size, GFP_KERNEL);
1636 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 return -ENOMEM;
1638
1639 dr = dl->dev_req;
1640
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001641 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001642 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001643 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001644 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001645
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001646 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1647 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001648
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 (dr + n)->dev_id = hdev->id;
1650 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001651
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 if (++n >= dev_num)
1653 break;
1654 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001655 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656
1657 dl->dev_num = n;
1658 size = sizeof(*dl) + n * sizeof(*dr);
1659
1660 err = copy_to_user(arg, dl, size);
1661 kfree(dl);
1662
1663 return err ? -EFAULT : 0;
1664}
1665
1666int hci_get_dev_info(void __user *arg)
1667{
1668 struct hci_dev *hdev;
1669 struct hci_dev_info di;
1670 int err = 0;
1671
1672 if (copy_from_user(&di, arg, sizeof(di)))
1673 return -EFAULT;
1674
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001675 hdev = hci_dev_get(di.dev_id);
1676 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 return -ENODEV;
1678
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001679 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001680 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001681
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001682 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1683 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 strcpy(di.name, hdev->name);
1686 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001687 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 di.flags = hdev->flags;
1689 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001690 if (lmp_bredr_capable(hdev)) {
1691 di.acl_mtu = hdev->acl_mtu;
1692 di.acl_pkts = hdev->acl_pkts;
1693 di.sco_mtu = hdev->sco_mtu;
1694 di.sco_pkts = hdev->sco_pkts;
1695 } else {
1696 di.acl_mtu = hdev->le_mtu;
1697 di.acl_pkts = hdev->le_pkts;
1698 di.sco_mtu = 0;
1699 di.sco_pkts = 0;
1700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 di.link_policy = hdev->link_policy;
1702 di.link_mode = hdev->link_mode;
1703
1704 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1705 memcpy(&di.features, &hdev->features, sizeof(di.features));
1706
1707 if (copy_to_user(arg, &di, sizeof(di)))
1708 err = -EFAULT;
1709
1710 hci_dev_put(hdev);
1711
1712 return err;
1713}
1714
1715/* ---- Interface to HCI drivers ---- */
1716
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001717static int hci_rfkill_set_block(void *data, bool blocked)
1718{
1719 struct hci_dev *hdev = data;
1720
1721 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1722
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001723 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1724 return -EBUSY;
1725
Johan Hedberg5e130362013-09-13 08:58:17 +03001726 if (blocked) {
1727 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001728 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1729 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001730 } else {
1731 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001732 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001733
1734 return 0;
1735}
1736
1737static const struct rfkill_ops hci_rfkill_ops = {
1738 .set_block = hci_rfkill_set_block,
1739};
1740
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001741static void hci_power_on(struct work_struct *work)
1742{
1743 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001744 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001745
1746 BT_DBG("%s", hdev->name);
1747
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001748 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001749 if (err < 0) {
1750 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001751 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001752 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001753
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001754 /* During the HCI setup phase, a few error conditions are
1755 * ignored and they need to be checked now. If they are still
1756 * valid, it is important to turn the device back off.
1757 */
1758 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1759 (hdev->dev_type == HCI_BREDR &&
1760 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1761 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001762 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1763 hci_dev_do_close(hdev);
1764 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001765 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1766 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001767 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001768
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001769 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001770 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001771}
1772
1773static void hci_power_off(struct work_struct *work)
1774{
Johan Hedberg32435532011-11-07 22:16:04 +02001775 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001776 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001777
1778 BT_DBG("%s", hdev->name);
1779
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001780 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001781}
1782
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001783static void hci_discov_off(struct work_struct *work)
1784{
1785 struct hci_dev *hdev;
1786 u8 scan = SCAN_PAGE;
1787
1788 hdev = container_of(work, struct hci_dev, discov_off.work);
1789
1790 BT_DBG("%s", hdev->name);
1791
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001792 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001793
1794 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1795
1796 hdev->discov_timeout = 0;
1797
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001798 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001799}
1800
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001801int hci_uuids_clear(struct hci_dev *hdev)
1802{
Johan Hedberg48210022013-01-27 00:31:28 +02001803 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001804
Johan Hedberg48210022013-01-27 00:31:28 +02001805 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1806 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001807 kfree(uuid);
1808 }
1809
1810 return 0;
1811}
1812
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001813int hci_link_keys_clear(struct hci_dev *hdev)
1814{
1815 struct list_head *p, *n;
1816
1817 list_for_each_safe(p, n, &hdev->link_keys) {
1818 struct link_key *key;
1819
1820 key = list_entry(p, struct link_key, list);
1821
1822 list_del(p);
1823 kfree(key);
1824 }
1825
1826 return 0;
1827}
1828
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001829int hci_smp_ltks_clear(struct hci_dev *hdev)
1830{
1831 struct smp_ltk *k, *tmp;
1832
1833 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1834 list_del(&k->list);
1835 kfree(k);
1836 }
1837
1838 return 0;
1839}
1840
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001841struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1842{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001843 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001844
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001845 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001846 if (bacmp(bdaddr, &k->bdaddr) == 0)
1847 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001848
1849 return NULL;
1850}
1851
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301852static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001853 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001854{
1855 /* Legacy key */
1856 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301857 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001858
1859 /* Debug keys are insecure so don't store them persistently */
1860 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301861 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001862
1863 /* Changed combination key and there's no previous one */
1864 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301865 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001866
1867 /* Security mode 3 case */
1868 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301869 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001870
1871 /* Neither local nor remote side had no-bonding as requirement */
1872 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301873 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001874
1875 /* Local side had dedicated bonding as requirement */
1876 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301877 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001878
1879 /* Remote side had dedicated bonding as requirement */
1880 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301881 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001882
1883 /* If none of the above criteria match, then don't store the key
1884 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301885 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001886}
1887
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001888struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001889{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001890 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001891
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001892 list_for_each_entry(k, &hdev->long_term_keys, list) {
1893 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001894 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001895 continue;
1896
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001897 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001898 }
1899
1900 return NULL;
1901}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001902
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001903struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001904 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001905{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001906 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001907
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001908 list_for_each_entry(k, &hdev->long_term_keys, list)
1909 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001910 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001911 return k;
1912
1913 return NULL;
1914}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001915
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001916int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001917 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001918{
1919 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301920 u8 old_key_type;
1921 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001922
1923 old_key = hci_find_link_key(hdev, bdaddr);
1924 if (old_key) {
1925 old_key_type = old_key->type;
1926 key = old_key;
1927 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001928 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001929 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1930 if (!key)
1931 return -ENOMEM;
1932 list_add(&key->list, &hdev->link_keys);
1933 }
1934
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001935 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001936
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001937 /* Some buggy controller combinations generate a changed
1938 * combination key for legacy pairing even when there's no
1939 * previous key */
1940 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001941 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001942 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001943 if (conn)
1944 conn->key_type = type;
1945 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001946
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001947 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001948 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001949 key->pin_len = pin_len;
1950
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001951 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001952 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001953 else
1954 key->type = type;
1955
Johan Hedberg4df378a2011-04-28 11:29:03 -07001956 if (!new_key)
1957 return 0;
1958
1959 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1960
Johan Hedberg744cf192011-11-08 20:40:14 +02001961 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001962
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301963 if (conn)
1964 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001965
1966 return 0;
1967}
1968
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001969int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001970 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001971 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001972{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001973 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001974
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001975 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1976 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001977
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001978 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1979 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001980 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001981 else {
1982 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001983 if (!key)
1984 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001985 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001986 }
1987
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001988 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001989 key->bdaddr_type = addr_type;
1990 memcpy(key->val, tk, sizeof(key->val));
1991 key->authenticated = authenticated;
1992 key->ediv = ediv;
1993 key->enc_size = enc_size;
1994 key->type = type;
1995 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001996
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001997 if (!new_key)
1998 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001999
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002000 if (type & HCI_SMP_LTK)
2001 mgmt_new_ltk(hdev, key, 1);
2002
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002003 return 0;
2004}
2005
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002006int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2007{
2008 struct link_key *key;
2009
2010 key = hci_find_link_key(hdev, bdaddr);
2011 if (!key)
2012 return -ENOENT;
2013
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002014 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002015
2016 list_del(&key->list);
2017 kfree(key);
2018
2019 return 0;
2020}
2021
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002022int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2023{
2024 struct smp_ltk *k, *tmp;
2025
2026 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2027 if (bacmp(bdaddr, &k->bdaddr))
2028 continue;
2029
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002030 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002031
2032 list_del(&k->list);
2033 kfree(k);
2034 }
2035
2036 return 0;
2037}
2038
Ville Tervo6bd32322011-02-16 16:32:41 +02002039/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002040static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002041{
2042 struct hci_dev *hdev = (void *) arg;
2043
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002044 if (hdev->sent_cmd) {
2045 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2046 u16 opcode = __le16_to_cpu(sent->opcode);
2047
2048 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2049 } else {
2050 BT_ERR("%s command tx timeout", hdev->name);
2051 }
2052
Ville Tervo6bd32322011-02-16 16:32:41 +02002053 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002054 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002055}
2056
Szymon Janc2763eda2011-03-22 13:12:22 +01002057struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002058 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002059{
2060 struct oob_data *data;
2061
2062 list_for_each_entry(data, &hdev->remote_oob_data, list)
2063 if (bacmp(bdaddr, &data->bdaddr) == 0)
2064 return data;
2065
2066 return NULL;
2067}
2068
2069int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2070{
2071 struct oob_data *data;
2072
2073 data = hci_find_remote_oob_data(hdev, bdaddr);
2074 if (!data)
2075 return -ENOENT;
2076
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002077 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002078
2079 list_del(&data->list);
2080 kfree(data);
2081
2082 return 0;
2083}
2084
2085int hci_remote_oob_data_clear(struct hci_dev *hdev)
2086{
2087 struct oob_data *data, *n;
2088
2089 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2090 list_del(&data->list);
2091 kfree(data);
2092 }
2093
2094 return 0;
2095}
2096
2097int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002098 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002099{
2100 struct oob_data *data;
2101
2102 data = hci_find_remote_oob_data(hdev, bdaddr);
2103
2104 if (!data) {
2105 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2106 if (!data)
2107 return -ENOMEM;
2108
2109 bacpy(&data->bdaddr, bdaddr);
2110 list_add(&data->list, &hdev->remote_oob_data);
2111 }
2112
2113 memcpy(data->hash, hash, sizeof(data->hash));
2114 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2115
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002116 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002117
2118 return 0;
2119}
2120
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002121struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002122{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002123 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002124
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002125 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002126 if (bacmp(bdaddr, &b->bdaddr) == 0)
2127 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002128
2129 return NULL;
2130}
2131
2132int hci_blacklist_clear(struct hci_dev *hdev)
2133{
2134 struct list_head *p, *n;
2135
2136 list_for_each_safe(p, n, &hdev->blacklist) {
2137 struct bdaddr_list *b;
2138
2139 b = list_entry(p, struct bdaddr_list, list);
2140
2141 list_del(p);
2142 kfree(b);
2143 }
2144
2145 return 0;
2146}
2147
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002148int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002149{
2150 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002151
2152 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2153 return -EBADF;
2154
Antti Julku5e762442011-08-25 16:48:02 +03002155 if (hci_blacklist_lookup(hdev, bdaddr))
2156 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002157
2158 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002159 if (!entry)
2160 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002161
2162 bacpy(&entry->bdaddr, bdaddr);
2163
2164 list_add(&entry->list, &hdev->blacklist);
2165
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002166 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002167}
2168
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002169int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002170{
2171 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002172
Szymon Janc1ec918c2011-11-16 09:32:21 +01002173 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002174 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002175
2176 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002177 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002178 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002179
2180 list_del(&entry->list);
2181 kfree(entry);
2182
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002183 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002184}
2185
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002186static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002187{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002188 if (status) {
2189 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002190
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002191 hci_dev_lock(hdev);
2192 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2193 hci_dev_unlock(hdev);
2194 return;
2195 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002196}
2197
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002198static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002199{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002200 /* General inquiry access code (GIAC) */
2201 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2202 struct hci_request req;
2203 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002204 int err;
2205
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002206 if (status) {
2207 BT_ERR("Failed to disable LE scanning: status %d", status);
2208 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002209 }
2210
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002211 switch (hdev->discovery.type) {
2212 case DISCOV_TYPE_LE:
2213 hci_dev_lock(hdev);
2214 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2215 hci_dev_unlock(hdev);
2216 break;
2217
2218 case DISCOV_TYPE_INTERLEAVED:
2219 hci_req_init(&req, hdev);
2220
2221 memset(&cp, 0, sizeof(cp));
2222 memcpy(&cp.lap, lap, sizeof(cp.lap));
2223 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2224 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2225
2226 hci_dev_lock(hdev);
2227
2228 hci_inquiry_cache_flush(hdev);
2229
2230 err = hci_req_run(&req, inquiry_complete);
2231 if (err) {
2232 BT_ERR("Inquiry request failed: err %d", err);
2233 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2234 }
2235
2236 hci_dev_unlock(hdev);
2237 break;
2238 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002239}
2240
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002241static void le_scan_disable_work(struct work_struct *work)
2242{
2243 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002244 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002245 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002246 struct hci_request req;
2247 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002248
2249 BT_DBG("%s", hdev->name);
2250
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002251 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002252
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002253 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002254 cp.enable = LE_SCAN_DISABLE;
2255 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002256
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002257 err = hci_req_run(&req, le_scan_disable_work_complete);
2258 if (err)
2259 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002260}
2261
David Herrmann9be0dab2012-04-22 14:39:57 +02002262/* Alloc HCI device */
2263struct hci_dev *hci_alloc_dev(void)
2264{
2265 struct hci_dev *hdev;
2266
2267 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2268 if (!hdev)
2269 return NULL;
2270
David Herrmannb1b813d2012-04-22 14:39:58 +02002271 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2272 hdev->esco_type = (ESCO_HV1);
2273 hdev->link_mode = (HCI_LM_ACCEPT);
2274 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002275 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2276 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002277
David Herrmannb1b813d2012-04-22 14:39:58 +02002278 hdev->sniff_max_interval = 800;
2279 hdev->sniff_min_interval = 80;
2280
Marcel Holtmannbef64732013-10-11 08:23:19 -07002281 hdev->le_scan_interval = 0x0060;
2282 hdev->le_scan_window = 0x0030;
2283
David Herrmannb1b813d2012-04-22 14:39:58 +02002284 mutex_init(&hdev->lock);
2285 mutex_init(&hdev->req_lock);
2286
2287 INIT_LIST_HEAD(&hdev->mgmt_pending);
2288 INIT_LIST_HEAD(&hdev->blacklist);
2289 INIT_LIST_HEAD(&hdev->uuids);
2290 INIT_LIST_HEAD(&hdev->link_keys);
2291 INIT_LIST_HEAD(&hdev->long_term_keys);
2292 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002293 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002294
2295 INIT_WORK(&hdev->rx_work, hci_rx_work);
2296 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2297 INIT_WORK(&hdev->tx_work, hci_tx_work);
2298 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002299
David Herrmannb1b813d2012-04-22 14:39:58 +02002300 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2301 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2302 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2303
David Herrmannb1b813d2012-04-22 14:39:58 +02002304 skb_queue_head_init(&hdev->rx_q);
2305 skb_queue_head_init(&hdev->cmd_q);
2306 skb_queue_head_init(&hdev->raw_q);
2307
2308 init_waitqueue_head(&hdev->req_wait_q);
2309
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002310 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002311
David Herrmannb1b813d2012-04-22 14:39:58 +02002312 hci_init_sysfs(hdev);
2313 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002314
2315 return hdev;
2316}
2317EXPORT_SYMBOL(hci_alloc_dev);
2318
2319/* Free HCI device */
2320void hci_free_dev(struct hci_dev *hdev)
2321{
David Herrmann9be0dab2012-04-22 14:39:57 +02002322 /* will free via device release */
2323 put_device(&hdev->dev);
2324}
2325EXPORT_SYMBOL(hci_free_dev);
2326
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327/* Register HCI device */
2328int hci_register_dev(struct hci_dev *hdev)
2329{
David Herrmannb1b813d2012-04-22 14:39:58 +02002330 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
David Herrmann010666a2012-01-07 15:47:07 +01002332 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 return -EINVAL;
2334
Mat Martineau08add512011-11-02 16:18:36 -07002335 /* Do not allow HCI_AMP devices to register at index 0,
2336 * so the index can be used as the AMP controller ID.
2337 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002338 switch (hdev->dev_type) {
2339 case HCI_BREDR:
2340 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2341 break;
2342 case HCI_AMP:
2343 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2344 break;
2345 default:
2346 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002348
Sasha Levin3df92b32012-05-27 22:36:56 +02002349 if (id < 0)
2350 return id;
2351
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 sprintf(hdev->name, "hci%d", id);
2353 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002354
2355 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2356
Kees Cookd8537542013-07-03 15:04:57 -07002357 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2358 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002359 if (!hdev->workqueue) {
2360 error = -ENOMEM;
2361 goto err;
2362 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002363
Kees Cookd8537542013-07-03 15:04:57 -07002364 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2365 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002366 if (!hdev->req_workqueue) {
2367 destroy_workqueue(hdev->workqueue);
2368 error = -ENOMEM;
2369 goto err;
2370 }
2371
David Herrmann33ca9542011-10-08 14:58:49 +02002372 error = hci_add_sysfs(hdev);
2373 if (error < 0)
2374 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002376 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002377 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2378 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002379 if (hdev->rfkill) {
2380 if (rfkill_register(hdev->rfkill) < 0) {
2381 rfkill_destroy(hdev->rfkill);
2382 hdev->rfkill = NULL;
2383 }
2384 }
2385
Johan Hedberg5e130362013-09-13 08:58:17 +03002386 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2387 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2388
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002389 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002390 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002391
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002392 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002393 /* Assume BR/EDR support until proven otherwise (such as
2394 * through reading supported features during init.
2395 */
2396 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2397 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002398
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002399 write_lock(&hci_dev_list_lock);
2400 list_add(&hdev->list, &hci_dev_list);
2401 write_unlock(&hci_dev_list_lock);
2402
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002404 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405
Johan Hedberg19202572013-01-14 22:33:51 +02002406 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002407
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002409
David Herrmann33ca9542011-10-08 14:58:49 +02002410err_wqueue:
2411 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002412 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002413err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002414 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002415
David Herrmann33ca9542011-10-08 14:58:49 +02002416 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417}
2418EXPORT_SYMBOL(hci_register_dev);
2419
2420/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002421void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422{
Sasha Levin3df92b32012-05-27 22:36:56 +02002423 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002424
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002425 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
Johan Hovold94324962012-03-15 14:48:41 +01002427 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2428
Sasha Levin3df92b32012-05-27 22:36:56 +02002429 id = hdev->id;
2430
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002431 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002433 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
2435 hci_dev_do_close(hdev);
2436
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302437 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002438 kfree_skb(hdev->reassembly[i]);
2439
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002440 cancel_work_sync(&hdev->power_on);
2441
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002442 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002443 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002444 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002445 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002446 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002447 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002448
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002449 /* mgmt_index_removed should take care of emptying the
2450 * pending list */
2451 BUG_ON(!list_empty(&hdev->mgmt_pending));
2452
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 hci_notify(hdev, HCI_DEV_UNREG);
2454
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002455 if (hdev->rfkill) {
2456 rfkill_unregister(hdev->rfkill);
2457 rfkill_destroy(hdev->rfkill);
2458 }
2459
David Herrmannce242972011-10-08 14:58:48 +02002460 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002461
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002462 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002463 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002464
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002465 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002466 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002467 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002468 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002469 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002470 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002471 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002472
David Herrmanndc946bd2012-01-07 15:47:24 +01002473 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002474
2475 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476}
2477EXPORT_SYMBOL(hci_unregister_dev);
2478
2479/* Suspend HCI device */
2480int hci_suspend_dev(struct hci_dev *hdev)
2481{
2482 hci_notify(hdev, HCI_DEV_SUSPEND);
2483 return 0;
2484}
2485EXPORT_SYMBOL(hci_suspend_dev);
2486
2487/* Resume HCI device */
2488int hci_resume_dev(struct hci_dev *hdev)
2489{
2490 hci_notify(hdev, HCI_DEV_RESUME);
2491 return 0;
2492}
2493EXPORT_SYMBOL(hci_resume_dev);
2494
Marcel Holtmann76bca882009-11-18 00:40:39 +01002495/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002496int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002497{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002498 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002499 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002500 kfree_skb(skb);
2501 return -ENXIO;
2502 }
2503
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002504 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002505 bt_cb(skb)->incoming = 1;
2506
2507 /* Time stamp */
2508 __net_timestamp(skb);
2509
Marcel Holtmann76bca882009-11-18 00:40:39 +01002510 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002511 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002512
Marcel Holtmann76bca882009-11-18 00:40:39 +01002513 return 0;
2514}
2515EXPORT_SYMBOL(hci_recv_frame);
2516
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302517static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002518 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302519{
2520 int len = 0;
2521 int hlen = 0;
2522 int remain = count;
2523 struct sk_buff *skb;
2524 struct bt_skb_cb *scb;
2525
2526 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002527 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302528 return -EILSEQ;
2529
2530 skb = hdev->reassembly[index];
2531
2532 if (!skb) {
2533 switch (type) {
2534 case HCI_ACLDATA_PKT:
2535 len = HCI_MAX_FRAME_SIZE;
2536 hlen = HCI_ACL_HDR_SIZE;
2537 break;
2538 case HCI_EVENT_PKT:
2539 len = HCI_MAX_EVENT_SIZE;
2540 hlen = HCI_EVENT_HDR_SIZE;
2541 break;
2542 case HCI_SCODATA_PKT:
2543 len = HCI_MAX_SCO_SIZE;
2544 hlen = HCI_SCO_HDR_SIZE;
2545 break;
2546 }
2547
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002548 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302549 if (!skb)
2550 return -ENOMEM;
2551
2552 scb = (void *) skb->cb;
2553 scb->expect = hlen;
2554 scb->pkt_type = type;
2555
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302556 hdev->reassembly[index] = skb;
2557 }
2558
2559 while (count) {
2560 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002561 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302562
2563 memcpy(skb_put(skb, len), data, len);
2564
2565 count -= len;
2566 data += len;
2567 scb->expect -= len;
2568 remain = count;
2569
2570 switch (type) {
2571 case HCI_EVENT_PKT:
2572 if (skb->len == HCI_EVENT_HDR_SIZE) {
2573 struct hci_event_hdr *h = hci_event_hdr(skb);
2574 scb->expect = h->plen;
2575
2576 if (skb_tailroom(skb) < scb->expect) {
2577 kfree_skb(skb);
2578 hdev->reassembly[index] = NULL;
2579 return -ENOMEM;
2580 }
2581 }
2582 break;
2583
2584 case HCI_ACLDATA_PKT:
2585 if (skb->len == HCI_ACL_HDR_SIZE) {
2586 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2587 scb->expect = __le16_to_cpu(h->dlen);
2588
2589 if (skb_tailroom(skb) < scb->expect) {
2590 kfree_skb(skb);
2591 hdev->reassembly[index] = NULL;
2592 return -ENOMEM;
2593 }
2594 }
2595 break;
2596
2597 case HCI_SCODATA_PKT:
2598 if (skb->len == HCI_SCO_HDR_SIZE) {
2599 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2600 scb->expect = h->dlen;
2601
2602 if (skb_tailroom(skb) < scb->expect) {
2603 kfree_skb(skb);
2604 hdev->reassembly[index] = NULL;
2605 return -ENOMEM;
2606 }
2607 }
2608 break;
2609 }
2610
2611 if (scb->expect == 0) {
2612 /* Complete frame */
2613
2614 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002615 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302616
2617 hdev->reassembly[index] = NULL;
2618 return remain;
2619 }
2620 }
2621
2622 return remain;
2623}
2624
Marcel Holtmannef222012007-07-11 06:42:04 +02002625int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2626{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302627 int rem = 0;
2628
Marcel Holtmannef222012007-07-11 06:42:04 +02002629 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2630 return -EILSEQ;
2631
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002632 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002633 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302634 if (rem < 0)
2635 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002636
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302637 data += (count - rem);
2638 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002639 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002640
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302641 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002642}
2643EXPORT_SYMBOL(hci_recv_fragment);
2644
Suraj Sumangala99811512010-07-14 13:02:19 +05302645#define STREAM_REASSEMBLY 0
2646
2647int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2648{
2649 int type;
2650 int rem = 0;
2651
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002652 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302653 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2654
2655 if (!skb) {
2656 struct { char type; } *pkt;
2657
2658 /* Start of the frame */
2659 pkt = data;
2660 type = pkt->type;
2661
2662 data++;
2663 count--;
2664 } else
2665 type = bt_cb(skb)->pkt_type;
2666
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002667 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002668 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302669 if (rem < 0)
2670 return rem;
2671
2672 data += (count - rem);
2673 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002674 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302675
2676 return rem;
2677}
2678EXPORT_SYMBOL(hci_recv_stream_fragment);
2679
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680/* ---- Interface to upper protocols ---- */
2681
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682int hci_register_cb(struct hci_cb *cb)
2683{
2684 BT_DBG("%p name %s", cb, cb->name);
2685
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002686 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002688 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
2690 return 0;
2691}
2692EXPORT_SYMBOL(hci_register_cb);
2693
2694int hci_unregister_cb(struct hci_cb *cb)
2695{
2696 BT_DBG("%p name %s", cb, cb->name);
2697
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002698 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002700 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
2702 return 0;
2703}
2704EXPORT_SYMBOL(hci_unregister_cb);
2705
Marcel Holtmann51086992013-10-10 14:54:19 -07002706static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002708 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002710 /* Time stamp */
2711 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002713 /* Send copy to monitor */
2714 hci_send_to_monitor(hdev, skb);
2715
2716 if (atomic_read(&hdev->promisc)) {
2717 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002718 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 }
2720
2721 /* Get rid of skb owner, prior to sending to the driver. */
2722 skb_orphan(skb);
2723
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002724 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002725 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726}
2727
Johan Hedberg3119ae92013-03-05 20:37:44 +02002728void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2729{
2730 skb_queue_head_init(&req->cmd_q);
2731 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002732 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002733}
2734
2735int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2736{
2737 struct hci_dev *hdev = req->hdev;
2738 struct sk_buff *skb;
2739 unsigned long flags;
2740
2741 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2742
Andre Guedes5d73e032013-03-08 11:20:16 -03002743 /* If an error occured during request building, remove all HCI
2744 * commands queued on the HCI request queue.
2745 */
2746 if (req->err) {
2747 skb_queue_purge(&req->cmd_q);
2748 return req->err;
2749 }
2750
Johan Hedberg3119ae92013-03-05 20:37:44 +02002751 /* Do not allow empty requests */
2752 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002753 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002754
2755 skb = skb_peek_tail(&req->cmd_q);
2756 bt_cb(skb)->req.complete = complete;
2757
2758 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2759 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2760 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2761
2762 queue_work(hdev->workqueue, &hdev->cmd_work);
2763
2764 return 0;
2765}
2766
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002767static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002768 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769{
2770 int len = HCI_COMMAND_HDR_SIZE + plen;
2771 struct hci_command_hdr *hdr;
2772 struct sk_buff *skb;
2773
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002775 if (!skb)
2776 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777
2778 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002779 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 hdr->plen = plen;
2781
2782 if (plen)
2783 memcpy(skb_put(skb, plen), param, plen);
2784
2785 BT_DBG("skb len %d", skb->len);
2786
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002787 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002788
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002789 return skb;
2790}
2791
2792/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002793int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2794 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002795{
2796 struct sk_buff *skb;
2797
2798 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2799
2800 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2801 if (!skb) {
2802 BT_ERR("%s no memory for command", hdev->name);
2803 return -ENOMEM;
2804 }
2805
Johan Hedberg11714b32013-03-05 20:37:47 +02002806 /* Stand-alone HCI commands must be flaged as
2807 * single-command requests.
2808 */
2809 bt_cb(skb)->req.start = true;
2810
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002812 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813
2814 return 0;
2815}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
Johan Hedberg71c76a12013-03-05 20:37:46 +02002817/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002818void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2819 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002820{
2821 struct hci_dev *hdev = req->hdev;
2822 struct sk_buff *skb;
2823
2824 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2825
Andre Guedes34739c12013-03-08 11:20:18 -03002826 /* If an error occured during request building, there is no point in
2827 * queueing the HCI command. We can simply return.
2828 */
2829 if (req->err)
2830 return;
2831
Johan Hedberg71c76a12013-03-05 20:37:46 +02002832 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2833 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002834 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2835 hdev->name, opcode);
2836 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002837 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002838 }
2839
2840 if (skb_queue_empty(&req->cmd_q))
2841 bt_cb(skb)->req.start = true;
2842
Johan Hedberg02350a72013-04-03 21:50:29 +03002843 bt_cb(skb)->req.event = event;
2844
Johan Hedberg71c76a12013-03-05 20:37:46 +02002845 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002846}
2847
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002848void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2849 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002850{
2851 hci_req_add_ev(req, opcode, plen, param, 0);
2852}
2853
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002855void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856{
2857 struct hci_command_hdr *hdr;
2858
2859 if (!hdev->sent_cmd)
2860 return NULL;
2861
2862 hdr = (void *) hdev->sent_cmd->data;
2863
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002864 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 return NULL;
2866
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002867 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868
2869 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2870}
2871
2872/* Send ACL data */
2873static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2874{
2875 struct hci_acl_hdr *hdr;
2876 int len = skb->len;
2877
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002878 skb_push(skb, HCI_ACL_HDR_SIZE);
2879 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002880 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002881 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2882 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883}
2884
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002885static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002886 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002888 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 struct hci_dev *hdev = conn->hdev;
2890 struct sk_buff *list;
2891
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002892 skb->len = skb_headlen(skb);
2893 skb->data_len = 0;
2894
2895 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002896
2897 switch (hdev->dev_type) {
2898 case HCI_BREDR:
2899 hci_add_acl_hdr(skb, conn->handle, flags);
2900 break;
2901 case HCI_AMP:
2902 hci_add_acl_hdr(skb, chan->handle, flags);
2903 break;
2904 default:
2905 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2906 return;
2907 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002908
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002909 list = skb_shinfo(skb)->frag_list;
2910 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 /* Non fragmented */
2912 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2913
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002914 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 } else {
2916 /* Fragmented */
2917 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2918
2919 skb_shinfo(skb)->frag_list = NULL;
2920
2921 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002922 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002924 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002925
2926 flags &= ~ACL_START;
2927 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 do {
2929 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002930
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002931 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002932 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
2934 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2935
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002936 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 } while (list);
2938
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002939 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002941}
2942
2943void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2944{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002945 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002946
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002947 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002948
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002949 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002951 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953
2954/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002955void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956{
2957 struct hci_dev *hdev = conn->hdev;
2958 struct hci_sco_hdr hdr;
2959
2960 BT_DBG("%s len %d", hdev->name, skb->len);
2961
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002962 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 hdr.dlen = skb->len;
2964
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002965 skb_push(skb, HCI_SCO_HDR_SIZE);
2966 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002967 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002969 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002972 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974
2975/* ---- HCI TX task (outgoing data) ---- */
2976
2977/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002978static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2979 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980{
2981 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002982 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002983 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002985 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002987
2988 rcu_read_lock();
2989
2990 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002991 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002993
2994 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2995 continue;
2996
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 num++;
2998
2999 if (c->sent < min) {
3000 min = c->sent;
3001 conn = c;
3002 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003003
3004 if (hci_conn_num(hdev, type) == num)
3005 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 }
3007
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003008 rcu_read_unlock();
3009
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003011 int cnt, q;
3012
3013 switch (conn->type) {
3014 case ACL_LINK:
3015 cnt = hdev->acl_cnt;
3016 break;
3017 case SCO_LINK:
3018 case ESCO_LINK:
3019 cnt = hdev->sco_cnt;
3020 break;
3021 case LE_LINK:
3022 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3023 break;
3024 default:
3025 cnt = 0;
3026 BT_ERR("Unknown link type");
3027 }
3028
3029 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 *quote = q ? q : 1;
3031 } else
3032 *quote = 0;
3033
3034 BT_DBG("conn %p quote %d", conn, *quote);
3035 return conn;
3036}
3037
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003038static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039{
3040 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003041 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042
Ville Tervobae1f5d92011-02-10 22:38:53 -03003043 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003045 rcu_read_lock();
3046
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003048 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003049 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003050 BT_ERR("%s killing stalled connection %pMR",
3051 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003052 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 }
3054 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003055
3056 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057}
3058
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003059static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3060 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003061{
3062 struct hci_conn_hash *h = &hdev->conn_hash;
3063 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003064 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003065 struct hci_conn *conn;
3066 int cnt, q, conn_num = 0;
3067
3068 BT_DBG("%s", hdev->name);
3069
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003070 rcu_read_lock();
3071
3072 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003073 struct hci_chan *tmp;
3074
3075 if (conn->type != type)
3076 continue;
3077
3078 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3079 continue;
3080
3081 conn_num++;
3082
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003083 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003084 struct sk_buff *skb;
3085
3086 if (skb_queue_empty(&tmp->data_q))
3087 continue;
3088
3089 skb = skb_peek(&tmp->data_q);
3090 if (skb->priority < cur_prio)
3091 continue;
3092
3093 if (skb->priority > cur_prio) {
3094 num = 0;
3095 min = ~0;
3096 cur_prio = skb->priority;
3097 }
3098
3099 num++;
3100
3101 if (conn->sent < min) {
3102 min = conn->sent;
3103 chan = tmp;
3104 }
3105 }
3106
3107 if (hci_conn_num(hdev, type) == conn_num)
3108 break;
3109 }
3110
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003111 rcu_read_unlock();
3112
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003113 if (!chan)
3114 return NULL;
3115
3116 switch (chan->conn->type) {
3117 case ACL_LINK:
3118 cnt = hdev->acl_cnt;
3119 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003120 case AMP_LINK:
3121 cnt = hdev->block_cnt;
3122 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003123 case SCO_LINK:
3124 case ESCO_LINK:
3125 cnt = hdev->sco_cnt;
3126 break;
3127 case LE_LINK:
3128 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3129 break;
3130 default:
3131 cnt = 0;
3132 BT_ERR("Unknown link type");
3133 }
3134
3135 q = cnt / num;
3136 *quote = q ? q : 1;
3137 BT_DBG("chan %p quote %d", chan, *quote);
3138 return chan;
3139}
3140
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003141static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3142{
3143 struct hci_conn_hash *h = &hdev->conn_hash;
3144 struct hci_conn *conn;
3145 int num = 0;
3146
3147 BT_DBG("%s", hdev->name);
3148
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003149 rcu_read_lock();
3150
3151 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003152 struct hci_chan *chan;
3153
3154 if (conn->type != type)
3155 continue;
3156
3157 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3158 continue;
3159
3160 num++;
3161
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003162 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003163 struct sk_buff *skb;
3164
3165 if (chan->sent) {
3166 chan->sent = 0;
3167 continue;
3168 }
3169
3170 if (skb_queue_empty(&chan->data_q))
3171 continue;
3172
3173 skb = skb_peek(&chan->data_q);
3174 if (skb->priority >= HCI_PRIO_MAX - 1)
3175 continue;
3176
3177 skb->priority = HCI_PRIO_MAX - 1;
3178
3179 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003180 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003181 }
3182
3183 if (hci_conn_num(hdev, type) == num)
3184 break;
3185 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003186
3187 rcu_read_unlock();
3188
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003189}
3190
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003191static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3192{
3193 /* Calculate count of blocks used by this packet */
3194 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3195}
3196
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003197static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 if (!test_bit(HCI_RAW, &hdev->flags)) {
3200 /* ACL tx timeout must be longer than maximum
3201 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003202 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003203 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003204 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003206}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003208static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003209{
3210 unsigned int cnt = hdev->acl_cnt;
3211 struct hci_chan *chan;
3212 struct sk_buff *skb;
3213 int quote;
3214
3215 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003216
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003217 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003218 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003219 u32 priority = (skb_peek(&chan->data_q))->priority;
3220 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003221 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003222 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003223
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003224 /* Stop if priority has changed */
3225 if (skb->priority < priority)
3226 break;
3227
3228 skb = skb_dequeue(&chan->data_q);
3229
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003230 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003231 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003232
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003233 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 hdev->acl_last_tx = jiffies;
3235
3236 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003237 chan->sent++;
3238 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 }
3240 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003241
3242 if (cnt != hdev->acl_cnt)
3243 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244}
3245
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003246static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003247{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003248 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003249 struct hci_chan *chan;
3250 struct sk_buff *skb;
3251 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003252 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003253
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003254 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003255
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003256 BT_DBG("%s", hdev->name);
3257
3258 if (hdev->dev_type == HCI_AMP)
3259 type = AMP_LINK;
3260 else
3261 type = ACL_LINK;
3262
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003263 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003264 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003265 u32 priority = (skb_peek(&chan->data_q))->priority;
3266 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3267 int blocks;
3268
3269 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003270 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003271
3272 /* Stop if priority has changed */
3273 if (skb->priority < priority)
3274 break;
3275
3276 skb = skb_dequeue(&chan->data_q);
3277
3278 blocks = __get_blocks(hdev, skb);
3279 if (blocks > hdev->block_cnt)
3280 return;
3281
3282 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003283 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003284
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003285 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003286 hdev->acl_last_tx = jiffies;
3287
3288 hdev->block_cnt -= blocks;
3289 quote -= blocks;
3290
3291 chan->sent += blocks;
3292 chan->conn->sent += blocks;
3293 }
3294 }
3295
3296 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003297 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003298}
3299
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003300static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003301{
3302 BT_DBG("%s", hdev->name);
3303
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003304 /* No ACL link over BR/EDR controller */
3305 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3306 return;
3307
3308 /* No AMP link over AMP controller */
3309 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003310 return;
3311
3312 switch (hdev->flow_ctl_mode) {
3313 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3314 hci_sched_acl_pkt(hdev);
3315 break;
3316
3317 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3318 hci_sched_acl_blk(hdev);
3319 break;
3320 }
3321}
3322
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003324static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325{
3326 struct hci_conn *conn;
3327 struct sk_buff *skb;
3328 int quote;
3329
3330 BT_DBG("%s", hdev->name);
3331
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003332 if (!hci_conn_num(hdev, SCO_LINK))
3333 return;
3334
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3336 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3337 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003338 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339
3340 conn->sent++;
3341 if (conn->sent == ~0)
3342 conn->sent = 0;
3343 }
3344 }
3345}
3346
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003347static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003348{
3349 struct hci_conn *conn;
3350 struct sk_buff *skb;
3351 int quote;
3352
3353 BT_DBG("%s", hdev->name);
3354
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003355 if (!hci_conn_num(hdev, ESCO_LINK))
3356 return;
3357
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003358 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3359 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003360 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3361 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003362 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003363
3364 conn->sent++;
3365 if (conn->sent == ~0)
3366 conn->sent = 0;
3367 }
3368 }
3369}
3370
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003371static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003372{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003373 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003374 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003375 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003376
3377 BT_DBG("%s", hdev->name);
3378
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003379 if (!hci_conn_num(hdev, LE_LINK))
3380 return;
3381
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003382 if (!test_bit(HCI_RAW, &hdev->flags)) {
3383 /* LE tx timeout must be longer than maximum
3384 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003385 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003386 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003387 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003388 }
3389
3390 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003391 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003392 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003393 u32 priority = (skb_peek(&chan->data_q))->priority;
3394 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003395 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003396 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003397
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003398 /* Stop if priority has changed */
3399 if (skb->priority < priority)
3400 break;
3401
3402 skb = skb_dequeue(&chan->data_q);
3403
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003404 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003405 hdev->le_last_tx = jiffies;
3406
3407 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003408 chan->sent++;
3409 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003410 }
3411 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003412
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003413 if (hdev->le_pkts)
3414 hdev->le_cnt = cnt;
3415 else
3416 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003417
3418 if (cnt != tmp)
3419 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003420}
3421
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003422static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003424 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 struct sk_buff *skb;
3426
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003427 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003428 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
Marcel Holtmann52de5992013-09-03 18:08:38 -07003430 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3431 /* Schedule queues and send stuff to HCI driver */
3432 hci_sched_acl(hdev);
3433 hci_sched_sco(hdev);
3434 hci_sched_esco(hdev);
3435 hci_sched_le(hdev);
3436 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003437
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 /* Send next queued raw (unknown type) packet */
3439 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003440 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441}
3442
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003443/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444
3445/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003446static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447{
3448 struct hci_acl_hdr *hdr = (void *) skb->data;
3449 struct hci_conn *conn;
3450 __u16 handle, flags;
3451
3452 skb_pull(skb, HCI_ACL_HDR_SIZE);
3453
3454 handle = __le16_to_cpu(hdr->handle);
3455 flags = hci_flags(handle);
3456 handle = hci_handle(handle);
3457
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003458 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003459 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460
3461 hdev->stat.acl_rx++;
3462
3463 hci_dev_lock(hdev);
3464 conn = hci_conn_hash_lookup_handle(hdev, handle);
3465 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003466
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003468 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003469
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003471 l2cap_recv_acldata(conn, skb, flags);
3472 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003474 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003475 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 }
3477
3478 kfree_skb(skb);
3479}
3480
3481/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003482static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483{
3484 struct hci_sco_hdr *hdr = (void *) skb->data;
3485 struct hci_conn *conn;
3486 __u16 handle;
3487
3488 skb_pull(skb, HCI_SCO_HDR_SIZE);
3489
3490 handle = __le16_to_cpu(hdr->handle);
3491
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003492 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493
3494 hdev->stat.sco_rx++;
3495
3496 hci_dev_lock(hdev);
3497 conn = hci_conn_hash_lookup_handle(hdev, handle);
3498 hci_dev_unlock(hdev);
3499
3500 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003502 sco_recv_scodata(conn, skb);
3503 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003505 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003506 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 }
3508
3509 kfree_skb(skb);
3510}
3511
Johan Hedberg9238f362013-03-05 20:37:48 +02003512static bool hci_req_is_complete(struct hci_dev *hdev)
3513{
3514 struct sk_buff *skb;
3515
3516 skb = skb_peek(&hdev->cmd_q);
3517 if (!skb)
3518 return true;
3519
3520 return bt_cb(skb)->req.start;
3521}
3522
Johan Hedberg42c6b122013-03-05 20:37:49 +02003523static void hci_resend_last(struct hci_dev *hdev)
3524{
3525 struct hci_command_hdr *sent;
3526 struct sk_buff *skb;
3527 u16 opcode;
3528
3529 if (!hdev->sent_cmd)
3530 return;
3531
3532 sent = (void *) hdev->sent_cmd->data;
3533 opcode = __le16_to_cpu(sent->opcode);
3534 if (opcode == HCI_OP_RESET)
3535 return;
3536
3537 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3538 if (!skb)
3539 return;
3540
3541 skb_queue_head(&hdev->cmd_q, skb);
3542 queue_work(hdev->workqueue, &hdev->cmd_work);
3543}
3544
Johan Hedberg9238f362013-03-05 20:37:48 +02003545void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3546{
3547 hci_req_complete_t req_complete = NULL;
3548 struct sk_buff *skb;
3549 unsigned long flags;
3550
3551 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3552
Johan Hedberg42c6b122013-03-05 20:37:49 +02003553 /* If the completed command doesn't match the last one that was
3554 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003555 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003556 if (!hci_sent_cmd_data(hdev, opcode)) {
3557 /* Some CSR based controllers generate a spontaneous
3558 * reset complete event during init and any pending
3559 * command will never be completed. In such a case we
3560 * need to resend whatever was the last sent
3561 * command.
3562 */
3563 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3564 hci_resend_last(hdev);
3565
Johan Hedberg9238f362013-03-05 20:37:48 +02003566 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003567 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003568
3569 /* If the command succeeded and there's still more commands in
3570 * this request the request is not yet complete.
3571 */
3572 if (!status && !hci_req_is_complete(hdev))
3573 return;
3574
3575 /* If this was the last command in a request the complete
3576 * callback would be found in hdev->sent_cmd instead of the
3577 * command queue (hdev->cmd_q).
3578 */
3579 if (hdev->sent_cmd) {
3580 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003581
3582 if (req_complete) {
3583 /* We must set the complete callback to NULL to
3584 * avoid calling the callback more than once if
3585 * this function gets called again.
3586 */
3587 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3588
Johan Hedberg9238f362013-03-05 20:37:48 +02003589 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003590 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003591 }
3592
3593 /* Remove all pending commands belonging to this request */
3594 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3595 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3596 if (bt_cb(skb)->req.start) {
3597 __skb_queue_head(&hdev->cmd_q, skb);
3598 break;
3599 }
3600
3601 req_complete = bt_cb(skb)->req.complete;
3602 kfree_skb(skb);
3603 }
3604 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3605
3606call_complete:
3607 if (req_complete)
3608 req_complete(hdev, status);
3609}
3610
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003611static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003613 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 struct sk_buff *skb;
3615
3616 BT_DBG("%s", hdev->name);
3617
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003619 /* Send copy to monitor */
3620 hci_send_to_monitor(hdev, skb);
3621
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 if (atomic_read(&hdev->promisc)) {
3623 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003624 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 }
3626
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003627 if (test_bit(HCI_RAW, &hdev->flags) ||
3628 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629 kfree_skb(skb);
3630 continue;
3631 }
3632
3633 if (test_bit(HCI_INIT, &hdev->flags)) {
3634 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003635 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 case HCI_ACLDATA_PKT:
3637 case HCI_SCODATA_PKT:
3638 kfree_skb(skb);
3639 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003640 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 }
3642
3643 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003644 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003646 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 hci_event_packet(hdev, skb);
3648 break;
3649
3650 case HCI_ACLDATA_PKT:
3651 BT_DBG("%s ACL data packet", hdev->name);
3652 hci_acldata_packet(hdev, skb);
3653 break;
3654
3655 case HCI_SCODATA_PKT:
3656 BT_DBG("%s SCO data packet", hdev->name);
3657 hci_scodata_packet(hdev, skb);
3658 break;
3659
3660 default:
3661 kfree_skb(skb);
3662 break;
3663 }
3664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665}
3666
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003667static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003669 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 struct sk_buff *skb;
3671
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003672 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3673 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003676 if (atomic_read(&hdev->cmd_cnt)) {
3677 skb = skb_dequeue(&hdev->cmd_q);
3678 if (!skb)
3679 return;
3680
Wei Yongjun7585b972009-02-25 18:29:52 +08003681 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003683 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003684 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003686 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003687 if (test_bit(HCI_RESET, &hdev->flags))
3688 del_timer(&hdev->cmd_timer);
3689 else
3690 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003691 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 } else {
3693 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003694 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 }
3696 }
3697}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003698
Andre Guedes31f79562012-04-24 21:02:53 -03003699u8 bdaddr_to_le(u8 bdaddr_type)
3700{
3701 switch (bdaddr_type) {
3702 case BDADDR_LE_PUBLIC:
3703 return ADDR_LE_DEV_PUBLIC;
3704
3705 default:
3706 /* Fallback to LE Random address type */
3707 return ADDR_LE_DEV_RANDOM;
3708 }
3709}