blob: 4549b5cbfac5aa5eaad080961ce29bb4803e6e0f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300610static void hci_set_event_mask_page_2(struct hci_request *req)
611{
612 struct hci_dev *hdev = req->hdev;
613 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
614
615 /* If Connectionless Slave Broadcast master role is supported
616 * enable all necessary events for it.
617 */
618 if (hdev->features[2][0] & 0x01) {
619 events[1] |= 0x40; /* Triggered Clock Capture */
620 events[1] |= 0x80; /* Synchronization Train Complete */
621 events[2] |= 0x10; /* Slave Page Response Timeout */
622 events[2] |= 0x20; /* CSB Channel Map Change */
623 }
624
625 /* If Connectionless Slave Broadcast slave role is supported
626 * enable all necessary events for it.
627 */
628 if (hdev->features[2][0] & 0x02) {
629 events[2] |= 0x01; /* Synchronization Train Received */
630 events[2] |= 0x02; /* CSB Receive */
631 events[2] |= 0x04; /* CSB Timeout */
632 events[2] |= 0x08; /* Truncated Page Complete */
633 }
634
635 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
636}
637
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300641 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100643 /* Some Broadcom based Bluetooth controllers do not support the
644 * Delete Stored Link Key command. They are clearly indicating its
645 * absence in the bit mask of supported commands.
646 *
647 * Check the supported commands and only if the the command is marked
648 * as supported send it. If not supported assume that the controller
649 * does not have actual support for stored link keys which makes this
650 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700651 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300652 if (hdev->commands[6] & 0x80) {
653 struct hci_cp_delete_stored_link_key cp;
654
655 bacpy(&cp.bdaddr, BDADDR_ANY);
656 cp.delete_all = 0x01;
657 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
658 sizeof(cp), &cp);
659 }
660
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500664 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500666 hci_update_ad(req);
667 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300668
669 /* Read features beyond page 1 if available */
670 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671 struct hci_cp_read_local_ext_features cp;
672
673 cp.page = p;
674 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675 sizeof(cp), &cp);
676 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677}
678
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300679static void hci_init4_req(struct hci_request *req, unsigned long opt)
680{
681 struct hci_dev *hdev = req->hdev;
682
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300683 /* Set event mask page 2 if the HCI command for it is supported */
684 if (hdev->commands[22] & 0x04)
685 hci_set_event_mask_page_2(req);
686
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300687 /* Check for Synchronization Train support */
688 if (hdev->features[2][0] & 0x04)
689 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
690}
691
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692static int __hci_init(struct hci_dev *hdev)
693{
694 int err;
695
696 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
697 if (err < 0)
698 return err;
699
700 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701 * BR/EDR/LE type controllers. AMP controllers only need the
702 * first stage init.
703 */
704 if (hdev->dev_type != HCI_BREDR)
705 return 0;
706
707 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
708 if (err < 0)
709 return err;
710
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300711 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
712 if (err < 0)
713 return err;
714
715 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716}
717
Johan Hedberg42c6b122013-03-05 20:37:49 +0200718static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
720 __u8 scan = opt;
721
Johan Hedberg42c6b122013-03-05 20:37:49 +0200722 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200725 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
Johan Hedberg42c6b122013-03-05 20:37:49 +0200728static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
730 __u8 auth = opt;
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Johan Hedberg42c6b122013-03-05 20:37:49 +0200738static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
740 __u8 encrypt = opt;
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200744 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200745 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200749{
750 __le16 policy = cpu_to_le16(opt);
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200753
754 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200755 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200756}
757
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900758/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 * Device is held on return. */
760struct hci_dev *hci_dev_get(int index)
761{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200762 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764 BT_DBG("%d", index);
765
766 if (index < 0)
767 return NULL;
768
769 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200770 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (d->id == index) {
772 hdev = hci_dev_hold(d);
773 break;
774 }
775 }
776 read_unlock(&hci_dev_list_lock);
777 return hdev;
778}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200781
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200782bool hci_discovery_active(struct hci_dev *hdev)
783{
784 struct discovery_state *discov = &hdev->discovery;
785
Andre Guedes6fbe1952012-02-03 17:47:58 -0300786 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300787 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300788 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200789 return true;
790
Andre Guedes6fbe1952012-02-03 17:47:58 -0300791 default:
792 return false;
793 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200794}
795
Johan Hedbergff9ef572012-01-04 14:23:45 +0200796void hci_discovery_set_state(struct hci_dev *hdev, int state)
797{
798 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
799
800 if (hdev->discovery.state == state)
801 return;
802
803 switch (state) {
804 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300805 if (hdev->discovery.state != DISCOVERY_STARTING)
806 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200807 break;
808 case DISCOVERY_STARTING:
809 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300810 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200811 mgmt_discovering(hdev, 1);
812 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200813 case DISCOVERY_RESOLVING:
814 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200815 case DISCOVERY_STOPPING:
816 break;
817 }
818
819 hdev->discovery.state = state;
820}
821
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300822void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
Johan Hedberg30883512012-01-04 14:16:21 +0200824 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200825 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
Johan Hedberg561aafb2012-01-04 13:31:59 +0200827 list_for_each_entry_safe(p, n, &cache->all, all) {
828 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200829 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200831
832 INIT_LIST_HEAD(&cache->unknown);
833 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834}
835
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300836struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
837 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838{
Johan Hedberg30883512012-01-04 14:16:21 +0200839 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 struct inquiry_entry *e;
841
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300842 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Johan Hedberg561aafb2012-01-04 13:31:59 +0200844 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200846 return e;
847 }
848
849 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850}
851
Johan Hedberg561aafb2012-01-04 13:31:59 +0200852struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300853 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200854{
Johan Hedberg30883512012-01-04 14:16:21 +0200855 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856 struct inquiry_entry *e;
857
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300858 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200859
860 list_for_each_entry(e, &cache->unknown, list) {
861 if (!bacmp(&e->data.bdaddr, bdaddr))
862 return e;
863 }
864
865 return NULL;
866}
867
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200868struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300869 bdaddr_t *bdaddr,
870 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200871{
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_entry *e;
874
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300875 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200876
877 list_for_each_entry(e, &cache->resolve, list) {
878 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
879 return e;
880 if (!bacmp(&e->data.bdaddr, bdaddr))
881 return e;
882 }
883
884 return NULL;
885}
886
Johan Hedberga3d4e202012-01-09 00:53:02 +0200887void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300888 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200889{
890 struct discovery_state *cache = &hdev->discovery;
891 struct list_head *pos = &cache->resolve;
892 struct inquiry_entry *p;
893
894 list_del(&ie->list);
895
896 list_for_each_entry(p, &cache->resolve, list) {
897 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300898 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200899 break;
900 pos = &p->list;
901 }
902
903 list_add(&ie->list, pos);
904}
905
Johan Hedberg31754052012-01-04 13:39:52 +0200906bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300907 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Johan Hedberg30883512012-01-04 14:16:21 +0200909 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200910 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300912 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Szymon Janc2b2fec42012-11-20 11:38:54 +0100914 hci_remove_remote_oob_data(hdev, &data->bdaddr);
915
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200916 if (ssp)
917 *ssp = data->ssp_mode;
918
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200919 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200920 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200921 if (ie->data.ssp_mode && ssp)
922 *ssp = true;
923
Johan Hedberga3d4e202012-01-09 00:53:02 +0200924 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300925 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200926 ie->data.rssi = data->rssi;
927 hci_inquiry_cache_update_resolve(hdev, ie);
928 }
929
Johan Hedberg561aafb2012-01-04 13:31:59 +0200930 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200931 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200932
Johan Hedberg561aafb2012-01-04 13:31:59 +0200933 /* Entry not in the cache. Add new one. */
934 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
935 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200936 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200937
938 list_add(&ie->all, &cache->all);
939
940 if (name_known) {
941 ie->name_state = NAME_KNOWN;
942 } else {
943 ie->name_state = NAME_NOT_KNOWN;
944 list_add(&ie->list, &cache->unknown);
945 }
946
947update:
948 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300949 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200950 ie->name_state = NAME_KNOWN;
951 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200954 memcpy(&ie->data, data, sizeof(*data));
955 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200957
958 if (ie->name_state == NAME_NOT_KNOWN)
959 return false;
960
961 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962}
963
964static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
965{
Johan Hedberg30883512012-01-04 14:16:21 +0200966 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 struct inquiry_info *info = (struct inquiry_info *) buf;
968 struct inquiry_entry *e;
969 int copied = 0;
970
Johan Hedberg561aafb2012-01-04 13:31:59 +0200971 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200973
974 if (copied >= num)
975 break;
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 bacpy(&info->bdaddr, &data->bdaddr);
978 info->pscan_rep_mode = data->pscan_rep_mode;
979 info->pscan_period_mode = data->pscan_period_mode;
980 info->pscan_mode = data->pscan_mode;
981 memcpy(info->dev_class, data->dev_class, 3);
982 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200985 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987
988 BT_DBG("cache %p, copied %d", cache, copied);
989 return copied;
990}
991
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993{
994 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 struct hci_cp_inquiry cp;
997
998 BT_DBG("%s", hdev->name);
999
1000 if (test_bit(HCI_INQUIRY, &hdev->flags))
1001 return;
1002
1003 /* Start Inquiry */
1004 memcpy(&cp.lap, &ir->lap, 3);
1005 cp.length = ir->length;
1006 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008}
1009
Andre Guedes3e13fa12013-03-27 20:04:56 -03001010static int wait_inquiry(void *word)
1011{
1012 schedule();
1013 return signal_pending(current);
1014}
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016int hci_inquiry(void __user *arg)
1017{
1018 __u8 __user *ptr = arg;
1019 struct hci_inquiry_req ir;
1020 struct hci_dev *hdev;
1021 int err = 0, do_inquiry = 0, max_rsp;
1022 long timeo;
1023 __u8 *buf;
1024
1025 if (copy_from_user(&ir, ptr, sizeof(ir)))
1026 return -EFAULT;
1027
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001028 hdev = hci_dev_get(ir.dev_id);
1029 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 return -ENODEV;
1031
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001032 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1033 err = -EBUSY;
1034 goto done;
1035 }
1036
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001037 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001038 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001039 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001040 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 do_inquiry = 1;
1042 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001043 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Marcel Holtmann04837f62006-07-03 10:02:33 +02001045 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001046
1047 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001048 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1049 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001050 if (err < 0)
1051 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001052
1053 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054 * cleared). If it is interrupted by a signal, return -EINTR.
1055 */
1056 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057 TASK_INTERRUPTIBLE))
1058 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001061 /* for unlimited number of responses we will use buffer with
1062 * 255 entries
1063 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1065
1066 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067 * copy it to the user space.
1068 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001069 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001070 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 err = -ENOMEM;
1072 goto done;
1073 }
1074
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001075 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001077 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 BT_DBG("num_rsp %d", ir.num_rsp);
1080
1081 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1082 ptr += sizeof(ir);
1083 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001084 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001086 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 err = -EFAULT;
1088
1089 kfree(buf);
1090
1091done:
1092 hci_dev_put(hdev);
1093 return err;
1094}
1095
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001096static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1097{
1098 u8 ad_len = 0, flags = 0;
1099 size_t name_len;
1100
1101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102 flags |= LE_AD_GENERAL;
1103
1104 if (!lmp_bredr_capable(hdev))
1105 flags |= LE_AD_NO_BREDR;
1106
1107 if (lmp_le_br_capable(hdev))
1108 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1109
1110 if (lmp_host_le_br_capable(hdev))
1111 flags |= LE_AD_SIM_LE_BREDR_HOST;
1112
1113 if (flags) {
1114 BT_DBG("adv flags 0x%02x", flags);
1115
1116 ptr[0] = 2;
1117 ptr[1] = EIR_FLAGS;
1118 ptr[2] = flags;
1119
1120 ad_len += 3;
1121 ptr += 3;
1122 }
1123
1124 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1125 ptr[0] = 2;
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->adv_tx_power;
1128
1129 ad_len += 3;
1130 ptr += 3;
1131 }
1132
1133 name_len = strlen(hdev->dev_name);
1134 if (name_len > 0) {
1135 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1136
1137 if (name_len > max_len) {
1138 name_len = max_len;
1139 ptr[1] = EIR_NAME_SHORT;
1140 } else
1141 ptr[1] = EIR_NAME_COMPLETE;
1142
1143 ptr[0] = name_len + 1;
1144
1145 memcpy(ptr + 2, hdev->dev_name, name_len);
1146
1147 ad_len += (name_len + 2);
1148 ptr += (name_len + 2);
1149 }
1150
1151 return ad_len;
1152}
1153
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001154void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001155{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001156 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001157 struct hci_cp_le_set_adv_data cp;
1158 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001159
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001160 if (!lmp_le_capable(hdev))
1161 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001162
1163 memset(&cp, 0, sizeof(cp));
1164
1165 len = create_ad(hdev, cp.data);
1166
1167 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001168 memcmp(cp.data, hdev->adv_data, len) == 0)
1169 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001170
1171 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172 hdev->adv_data_len = len;
1173
1174 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001175
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001176 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001177}
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179/* ---- HCI ioctl helpers ---- */
1180
1181int hci_dev_open(__u16 dev)
1182{
1183 struct hci_dev *hdev;
1184 int ret = 0;
1185
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001186 hdev = hci_dev_get(dev);
1187 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 return -ENODEV;
1189
1190 BT_DBG("%s %p", hdev->name, hdev);
1191
1192 hci_req_lock(hdev);
1193
Johan Hovold94324962012-03-15 14:48:41 +01001194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195 ret = -ENODEV;
1196 goto done;
1197 }
1198
Johan Hedbergbf543032013-09-13 08:58:18 +03001199 /* Check for rfkill but allow the HCI setup stage to proceed
1200 * (which in itself doesn't cause any RF activity).
1201 */
1202 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1203 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001204 ret = -ERFKILL;
1205 goto done;
1206 }
1207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 if (test_bit(HCI_UP, &hdev->flags)) {
1209 ret = -EALREADY;
1210 goto done;
1211 }
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 if (hdev->open(hdev)) {
1214 ret = -EIO;
1215 goto done;
1216 }
1217
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001218 atomic_set(&hdev->cmd_cnt, 1);
1219 set_bit(HCI_INIT, &hdev->flags);
1220
1221 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1222 ret = hdev->setup(hdev);
1223
1224 if (!ret) {
1225 /* Treat all non BR/EDR controllers as raw devices if
1226 * enable_hs is not set.
1227 */
1228 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1229 set_bit(HCI_RAW, &hdev->flags);
1230
1231 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1232 set_bit(HCI_RAW, &hdev->flags);
1233
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001234 if (!test_bit(HCI_RAW, &hdev->flags) &&
1235 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001236 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 }
1238
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001239 clear_bit(HCI_INIT, &hdev->flags);
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 if (!ret) {
1242 hci_dev_hold(hdev);
1243 set_bit(HCI_UP, &hdev->flags);
1244 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001245 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001246 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001247 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001248 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001249 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001250 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001251 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001252 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001254 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001255 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001256 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
1258 skb_queue_purge(&hdev->cmd_q);
1259 skb_queue_purge(&hdev->rx_q);
1260
1261 if (hdev->flush)
1262 hdev->flush(hdev);
1263
1264 if (hdev->sent_cmd) {
1265 kfree_skb(hdev->sent_cmd);
1266 hdev->sent_cmd = NULL;
1267 }
1268
1269 hdev->close(hdev);
1270 hdev->flags = 0;
1271 }
1272
1273done:
1274 hci_req_unlock(hdev);
1275 hci_dev_put(hdev);
1276 return ret;
1277}
1278
1279static int hci_dev_do_close(struct hci_dev *hdev)
1280{
1281 BT_DBG("%s %p", hdev->name, hdev);
1282
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001283 cancel_delayed_work(&hdev->power_off);
1284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 hci_req_cancel(hdev, ENODEV);
1286 hci_req_lock(hdev);
1287
1288 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001289 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 hci_req_unlock(hdev);
1291 return 0;
1292 }
1293
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001294 /* Flush RX and TX works */
1295 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001296 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001298 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001299 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001300 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001301 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001302 }
1303
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001304 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001305 cancel_delayed_work(&hdev->service_cache);
1306
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001307 cancel_delayed_work_sync(&hdev->le_scan_disable);
1308
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001309 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001310 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001312 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 hci_notify(hdev, HCI_DEV_DOWN);
1315
1316 if (hdev->flush)
1317 hdev->flush(hdev);
1318
1319 /* Reset device */
1320 skb_queue_purge(&hdev->cmd_q);
1321 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001322 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001323 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001325 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 clear_bit(HCI_INIT, &hdev->flags);
1327 }
1328
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001329 /* flush cmd work */
1330 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
1332 /* Drop queues */
1333 skb_queue_purge(&hdev->rx_q);
1334 skb_queue_purge(&hdev->cmd_q);
1335 skb_queue_purge(&hdev->raw_q);
1336
1337 /* Drop last sent command */
1338 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001339 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 kfree_skb(hdev->sent_cmd);
1341 hdev->sent_cmd = NULL;
1342 }
1343
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001344 kfree_skb(hdev->recv_evt);
1345 hdev->recv_evt = NULL;
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 /* After this point our queues are empty
1348 * and no tasks are scheduled. */
1349 hdev->close(hdev);
1350
Johan Hedberg35b973c2013-03-15 17:06:59 -05001351 /* Clear flags */
1352 hdev->flags = 0;
1353 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1354
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001355 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1356 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001357 hci_dev_lock(hdev);
1358 mgmt_powered(hdev, 0);
1359 hci_dev_unlock(hdev);
1360 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001361
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001362 /* Controller radio is available but is currently powered down */
1363 hdev->amp_status = 0;
1364
Johan Hedberge59fda82012-02-22 18:11:53 +02001365 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001366 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001367
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 hci_req_unlock(hdev);
1369
1370 hci_dev_put(hdev);
1371 return 0;
1372}
1373
1374int hci_dev_close(__u16 dev)
1375{
1376 struct hci_dev *hdev;
1377 int err;
1378
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001379 hdev = hci_dev_get(dev);
1380 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001382
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001383 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1384 err = -EBUSY;
1385 goto done;
1386 }
1387
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001388 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1389 cancel_delayed_work(&hdev->power_off);
1390
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001392
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001393done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 hci_dev_put(hdev);
1395 return err;
1396}
1397
1398int hci_dev_reset(__u16 dev)
1399{
1400 struct hci_dev *hdev;
1401 int ret = 0;
1402
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001403 hdev = hci_dev_get(dev);
1404 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 return -ENODEV;
1406
1407 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Marcel Holtmann808a0492013-08-26 20:57:58 -07001409 if (!test_bit(HCI_UP, &hdev->flags)) {
1410 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001412 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001414 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1415 ret = -EBUSY;
1416 goto done;
1417 }
1418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 /* Drop queues */
1420 skb_queue_purge(&hdev->rx_q);
1421 skb_queue_purge(&hdev->cmd_q);
1422
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001423 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001424 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001426 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
1428 if (hdev->flush)
1429 hdev->flush(hdev);
1430
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001431 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001432 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
1434 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001435 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
1437done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 hci_req_unlock(hdev);
1439 hci_dev_put(hdev);
1440 return ret;
1441}
1442
1443int hci_dev_reset_stat(__u16 dev)
1444{
1445 struct hci_dev *hdev;
1446 int ret = 0;
1447
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001448 hdev = hci_dev_get(dev);
1449 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 return -ENODEV;
1451
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001452 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1453 ret = -EBUSY;
1454 goto done;
1455 }
1456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1458
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001459done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 return ret;
1462}
1463
1464int hci_dev_cmd(unsigned int cmd, void __user *arg)
1465{
1466 struct hci_dev *hdev;
1467 struct hci_dev_req dr;
1468 int err = 0;
1469
1470 if (copy_from_user(&dr, arg, sizeof(dr)))
1471 return -EFAULT;
1472
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001473 hdev = hci_dev_get(dr.dev_id);
1474 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return -ENODEV;
1476
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001477 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1478 err = -EBUSY;
1479 goto done;
1480 }
1481
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 switch (cmd) {
1483 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001484 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1485 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 break;
1487
1488 case HCISETENCRYPT:
1489 if (!lmp_encrypt_capable(hdev)) {
1490 err = -EOPNOTSUPP;
1491 break;
1492 }
1493
1494 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1495 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001496 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1497 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (err)
1499 break;
1500 }
1501
Johan Hedberg01178cd2013-03-05 20:37:41 +02001502 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1503 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 break;
1505
1506 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001507 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1508 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 break;
1510
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001511 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001512 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1513 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001514 break;
1515
1516 case HCISETLINKMODE:
1517 hdev->link_mode = ((__u16) dr.dev_opt) &
1518 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1519 break;
1520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 case HCISETPTYPE:
1522 hdev->pkt_type = (__u16) dr.dev_opt;
1523 break;
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001526 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1527 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 break;
1529
1530 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001531 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1532 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 break;
1534
1535 default:
1536 err = -EINVAL;
1537 break;
1538 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001539
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001540done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 hci_dev_put(hdev);
1542 return err;
1543}
1544
1545int hci_get_dev_list(void __user *arg)
1546{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001547 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 struct hci_dev_list_req *dl;
1549 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 int n = 0, size, err;
1551 __u16 dev_num;
1552
1553 if (get_user(dev_num, (__u16 __user *) arg))
1554 return -EFAULT;
1555
1556 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1557 return -EINVAL;
1558
1559 size = sizeof(*dl) + dev_num * sizeof(*dr);
1560
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001561 dl = kzalloc(size, GFP_KERNEL);
1562 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 return -ENOMEM;
1564
1565 dr = dl->dev_req;
1566
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001567 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001568 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001569 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001570 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001571
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001572 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1573 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001574
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 (dr + n)->dev_id = hdev->id;
1576 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001577
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (++n >= dev_num)
1579 break;
1580 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001581 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583 dl->dev_num = n;
1584 size = sizeof(*dl) + n * sizeof(*dr);
1585
1586 err = copy_to_user(arg, dl, size);
1587 kfree(dl);
1588
1589 return err ? -EFAULT : 0;
1590}
1591
1592int hci_get_dev_info(void __user *arg)
1593{
1594 struct hci_dev *hdev;
1595 struct hci_dev_info di;
1596 int err = 0;
1597
1598 if (copy_from_user(&di, arg, sizeof(di)))
1599 return -EFAULT;
1600
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001601 hdev = hci_dev_get(di.dev_id);
1602 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 return -ENODEV;
1604
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001605 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001606 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001607
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001608 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1609 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001610
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 strcpy(di.name, hdev->name);
1612 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001613 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 di.flags = hdev->flags;
1615 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001616 if (lmp_bredr_capable(hdev)) {
1617 di.acl_mtu = hdev->acl_mtu;
1618 di.acl_pkts = hdev->acl_pkts;
1619 di.sco_mtu = hdev->sco_mtu;
1620 di.sco_pkts = hdev->sco_pkts;
1621 } else {
1622 di.acl_mtu = hdev->le_mtu;
1623 di.acl_pkts = hdev->le_pkts;
1624 di.sco_mtu = 0;
1625 di.sco_pkts = 0;
1626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 di.link_policy = hdev->link_policy;
1628 di.link_mode = hdev->link_mode;
1629
1630 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1631 memcpy(&di.features, &hdev->features, sizeof(di.features));
1632
1633 if (copy_to_user(arg, &di, sizeof(di)))
1634 err = -EFAULT;
1635
1636 hci_dev_put(hdev);
1637
1638 return err;
1639}
1640
1641/* ---- Interface to HCI drivers ---- */
1642
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001643static int hci_rfkill_set_block(void *data, bool blocked)
1644{
1645 struct hci_dev *hdev = data;
1646
1647 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1648
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001649 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1650 return -EBUSY;
1651
Johan Hedberg5e130362013-09-13 08:58:17 +03001652 if (blocked) {
1653 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001654 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1655 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001656 } else {
1657 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001658 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001659
1660 return 0;
1661}
1662
1663static const struct rfkill_ops hci_rfkill_ops = {
1664 .set_block = hci_rfkill_set_block,
1665};
1666
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001667static void hci_power_on(struct work_struct *work)
1668{
1669 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001670 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001671
1672 BT_DBG("%s", hdev->name);
1673
Johan Hedberg96570ff2013-05-29 09:51:29 +03001674 err = hci_dev_open(hdev->id);
1675 if (err < 0) {
1676 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001677 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001678 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001679
Johan Hedbergbf543032013-09-13 08:58:18 +03001680 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1681 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1682 hci_dev_do_close(hdev);
1683 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001684 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1685 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001686 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001687
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001688 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001689 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001690}
1691
1692static void hci_power_off(struct work_struct *work)
1693{
Johan Hedberg32435532011-11-07 22:16:04 +02001694 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001695 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001696
1697 BT_DBG("%s", hdev->name);
1698
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001699 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001700}
1701
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001702static void hci_discov_off(struct work_struct *work)
1703{
1704 struct hci_dev *hdev;
1705 u8 scan = SCAN_PAGE;
1706
1707 hdev = container_of(work, struct hci_dev, discov_off.work);
1708
1709 BT_DBG("%s", hdev->name);
1710
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001711 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001712
1713 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1714
1715 hdev->discov_timeout = 0;
1716
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001717 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001718}
1719
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001720int hci_uuids_clear(struct hci_dev *hdev)
1721{
Johan Hedberg48210022013-01-27 00:31:28 +02001722 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001723
Johan Hedberg48210022013-01-27 00:31:28 +02001724 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1725 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001726 kfree(uuid);
1727 }
1728
1729 return 0;
1730}
1731
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001732int hci_link_keys_clear(struct hci_dev *hdev)
1733{
1734 struct list_head *p, *n;
1735
1736 list_for_each_safe(p, n, &hdev->link_keys) {
1737 struct link_key *key;
1738
1739 key = list_entry(p, struct link_key, list);
1740
1741 list_del(p);
1742 kfree(key);
1743 }
1744
1745 return 0;
1746}
1747
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001748int hci_smp_ltks_clear(struct hci_dev *hdev)
1749{
1750 struct smp_ltk *k, *tmp;
1751
1752 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1753 list_del(&k->list);
1754 kfree(k);
1755 }
1756
1757 return 0;
1758}
1759
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001760struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1761{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001762 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001763
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001764 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001765 if (bacmp(bdaddr, &k->bdaddr) == 0)
1766 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001767
1768 return NULL;
1769}
1770
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301771static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001772 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001773{
1774 /* Legacy key */
1775 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301776 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001777
1778 /* Debug keys are insecure so don't store them persistently */
1779 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301780 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001781
1782 /* Changed combination key and there's no previous one */
1783 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301784 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001785
1786 /* Security mode 3 case */
1787 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301788 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001789
1790 /* Neither local nor remote side had no-bonding as requirement */
1791 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301792 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001793
1794 /* Local side had dedicated bonding as requirement */
1795 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301796 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001797
1798 /* Remote side had dedicated bonding as requirement */
1799 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301800 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001801
1802 /* If none of the above criteria match, then don't store the key
1803 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301804 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001805}
1806
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001807struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001808{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001809 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001810
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001811 list_for_each_entry(k, &hdev->long_term_keys, list) {
1812 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001813 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001814 continue;
1815
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001816 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001817 }
1818
1819 return NULL;
1820}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001821
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001822struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001823 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001824{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001825 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001826
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001827 list_for_each_entry(k, &hdev->long_term_keys, list)
1828 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001829 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001830 return k;
1831
1832 return NULL;
1833}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001834
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001835int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001836 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001837{
1838 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301839 u8 old_key_type;
1840 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001841
1842 old_key = hci_find_link_key(hdev, bdaddr);
1843 if (old_key) {
1844 old_key_type = old_key->type;
1845 key = old_key;
1846 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001847 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001848 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1849 if (!key)
1850 return -ENOMEM;
1851 list_add(&key->list, &hdev->link_keys);
1852 }
1853
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001854 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001855
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001856 /* Some buggy controller combinations generate a changed
1857 * combination key for legacy pairing even when there's no
1858 * previous key */
1859 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001860 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001861 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001862 if (conn)
1863 conn->key_type = type;
1864 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001865
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001866 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001867 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001868 key->pin_len = pin_len;
1869
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001870 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001871 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001872 else
1873 key->type = type;
1874
Johan Hedberg4df378a2011-04-28 11:29:03 -07001875 if (!new_key)
1876 return 0;
1877
1878 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1879
Johan Hedberg744cf192011-11-08 20:40:14 +02001880 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001881
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301882 if (conn)
1883 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001884
1885 return 0;
1886}
1887
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001888int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001889 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001890 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001891{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001892 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001893
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001894 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1895 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001896
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001897 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1898 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001899 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001900 else {
1901 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001902 if (!key)
1903 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001904 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001905 }
1906
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001907 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001908 key->bdaddr_type = addr_type;
1909 memcpy(key->val, tk, sizeof(key->val));
1910 key->authenticated = authenticated;
1911 key->ediv = ediv;
1912 key->enc_size = enc_size;
1913 key->type = type;
1914 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001915
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001916 if (!new_key)
1917 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001918
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001919 if (type & HCI_SMP_LTK)
1920 mgmt_new_ltk(hdev, key, 1);
1921
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001922 return 0;
1923}
1924
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001925int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1926{
1927 struct link_key *key;
1928
1929 key = hci_find_link_key(hdev, bdaddr);
1930 if (!key)
1931 return -ENOENT;
1932
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001933 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001934
1935 list_del(&key->list);
1936 kfree(key);
1937
1938 return 0;
1939}
1940
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001941int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1942{
1943 struct smp_ltk *k, *tmp;
1944
1945 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1946 if (bacmp(bdaddr, &k->bdaddr))
1947 continue;
1948
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001949 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001950
1951 list_del(&k->list);
1952 kfree(k);
1953 }
1954
1955 return 0;
1956}
1957
Ville Tervo6bd32322011-02-16 16:32:41 +02001958/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001959static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001960{
1961 struct hci_dev *hdev = (void *) arg;
1962
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001963 if (hdev->sent_cmd) {
1964 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1965 u16 opcode = __le16_to_cpu(sent->opcode);
1966
1967 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1968 } else {
1969 BT_ERR("%s command tx timeout", hdev->name);
1970 }
1971
Ville Tervo6bd32322011-02-16 16:32:41 +02001972 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001973 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001974}
1975
Szymon Janc2763eda2011-03-22 13:12:22 +01001976struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001977 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001978{
1979 struct oob_data *data;
1980
1981 list_for_each_entry(data, &hdev->remote_oob_data, list)
1982 if (bacmp(bdaddr, &data->bdaddr) == 0)
1983 return data;
1984
1985 return NULL;
1986}
1987
1988int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1989{
1990 struct oob_data *data;
1991
1992 data = hci_find_remote_oob_data(hdev, bdaddr);
1993 if (!data)
1994 return -ENOENT;
1995
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001996 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001997
1998 list_del(&data->list);
1999 kfree(data);
2000
2001 return 0;
2002}
2003
2004int hci_remote_oob_data_clear(struct hci_dev *hdev)
2005{
2006 struct oob_data *data, *n;
2007
2008 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2009 list_del(&data->list);
2010 kfree(data);
2011 }
2012
2013 return 0;
2014}
2015
2016int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002017 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002018{
2019 struct oob_data *data;
2020
2021 data = hci_find_remote_oob_data(hdev, bdaddr);
2022
2023 if (!data) {
2024 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2025 if (!data)
2026 return -ENOMEM;
2027
2028 bacpy(&data->bdaddr, bdaddr);
2029 list_add(&data->list, &hdev->remote_oob_data);
2030 }
2031
2032 memcpy(data->hash, hash, sizeof(data->hash));
2033 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2034
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002035 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002036
2037 return 0;
2038}
2039
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002040struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002041{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002042 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002043
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002044 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002045 if (bacmp(bdaddr, &b->bdaddr) == 0)
2046 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002047
2048 return NULL;
2049}
2050
2051int hci_blacklist_clear(struct hci_dev *hdev)
2052{
2053 struct list_head *p, *n;
2054
2055 list_for_each_safe(p, n, &hdev->blacklist) {
2056 struct bdaddr_list *b;
2057
2058 b = list_entry(p, struct bdaddr_list, list);
2059
2060 list_del(p);
2061 kfree(b);
2062 }
2063
2064 return 0;
2065}
2066
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002067int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002068{
2069 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002070
2071 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2072 return -EBADF;
2073
Antti Julku5e762442011-08-25 16:48:02 +03002074 if (hci_blacklist_lookup(hdev, bdaddr))
2075 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002076
2077 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002078 if (!entry)
2079 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002080
2081 bacpy(&entry->bdaddr, bdaddr);
2082
2083 list_add(&entry->list, &hdev->blacklist);
2084
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002085 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002086}
2087
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002088int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002089{
2090 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002091
Szymon Janc1ec918c2011-11-16 09:32:21 +01002092 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002093 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002094
2095 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002096 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002097 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002098
2099 list_del(&entry->list);
2100 kfree(entry);
2101
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002102 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002103}
2104
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002105static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002106{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002107 if (status) {
2108 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002109
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002110 hci_dev_lock(hdev);
2111 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2112 hci_dev_unlock(hdev);
2113 return;
2114 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002115}
2116
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002117static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002118{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002119 /* General inquiry access code (GIAC) */
2120 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2121 struct hci_request req;
2122 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002123 int err;
2124
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002125 if (status) {
2126 BT_ERR("Failed to disable LE scanning: status %d", status);
2127 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002128 }
2129
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002130 switch (hdev->discovery.type) {
2131 case DISCOV_TYPE_LE:
2132 hci_dev_lock(hdev);
2133 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2134 hci_dev_unlock(hdev);
2135 break;
2136
2137 case DISCOV_TYPE_INTERLEAVED:
2138 hci_req_init(&req, hdev);
2139
2140 memset(&cp, 0, sizeof(cp));
2141 memcpy(&cp.lap, lap, sizeof(cp.lap));
2142 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2143 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2144
2145 hci_dev_lock(hdev);
2146
2147 hci_inquiry_cache_flush(hdev);
2148
2149 err = hci_req_run(&req, inquiry_complete);
2150 if (err) {
2151 BT_ERR("Inquiry request failed: err %d", err);
2152 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2153 }
2154
2155 hci_dev_unlock(hdev);
2156 break;
2157 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002158}
2159
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002160static void le_scan_disable_work(struct work_struct *work)
2161{
2162 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002163 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002164 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002165 struct hci_request req;
2166 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002167
2168 BT_DBG("%s", hdev->name);
2169
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002170 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002171
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002172 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002173 cp.enable = LE_SCAN_DISABLE;
2174 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002175
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002176 err = hci_req_run(&req, le_scan_disable_work_complete);
2177 if (err)
2178 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002179}
2180
David Herrmann9be0dab2012-04-22 14:39:57 +02002181/* Alloc HCI device */
2182struct hci_dev *hci_alloc_dev(void)
2183{
2184 struct hci_dev *hdev;
2185
2186 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2187 if (!hdev)
2188 return NULL;
2189
David Herrmannb1b813d2012-04-22 14:39:58 +02002190 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2191 hdev->esco_type = (ESCO_HV1);
2192 hdev->link_mode = (HCI_LM_ACCEPT);
2193 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002194 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2195 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002196
David Herrmannb1b813d2012-04-22 14:39:58 +02002197 hdev->sniff_max_interval = 800;
2198 hdev->sniff_min_interval = 80;
2199
2200 mutex_init(&hdev->lock);
2201 mutex_init(&hdev->req_lock);
2202
2203 INIT_LIST_HEAD(&hdev->mgmt_pending);
2204 INIT_LIST_HEAD(&hdev->blacklist);
2205 INIT_LIST_HEAD(&hdev->uuids);
2206 INIT_LIST_HEAD(&hdev->link_keys);
2207 INIT_LIST_HEAD(&hdev->long_term_keys);
2208 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002209 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002210
2211 INIT_WORK(&hdev->rx_work, hci_rx_work);
2212 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2213 INIT_WORK(&hdev->tx_work, hci_tx_work);
2214 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002215
David Herrmannb1b813d2012-04-22 14:39:58 +02002216 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2217 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2218 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2219
David Herrmannb1b813d2012-04-22 14:39:58 +02002220 skb_queue_head_init(&hdev->rx_q);
2221 skb_queue_head_init(&hdev->cmd_q);
2222 skb_queue_head_init(&hdev->raw_q);
2223
2224 init_waitqueue_head(&hdev->req_wait_q);
2225
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002226 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002227
David Herrmannb1b813d2012-04-22 14:39:58 +02002228 hci_init_sysfs(hdev);
2229 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002230
2231 return hdev;
2232}
2233EXPORT_SYMBOL(hci_alloc_dev);
2234
2235/* Free HCI device */
2236void hci_free_dev(struct hci_dev *hdev)
2237{
David Herrmann9be0dab2012-04-22 14:39:57 +02002238 /* will free via device release */
2239 put_device(&hdev->dev);
2240}
2241EXPORT_SYMBOL(hci_free_dev);
2242
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243/* Register HCI device */
2244int hci_register_dev(struct hci_dev *hdev)
2245{
David Herrmannb1b813d2012-04-22 14:39:58 +02002246 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
David Herrmann010666a2012-01-07 15:47:07 +01002248 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 return -EINVAL;
2250
Mat Martineau08add512011-11-02 16:18:36 -07002251 /* Do not allow HCI_AMP devices to register at index 0,
2252 * so the index can be used as the AMP controller ID.
2253 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002254 switch (hdev->dev_type) {
2255 case HCI_BREDR:
2256 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2257 break;
2258 case HCI_AMP:
2259 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2260 break;
2261 default:
2262 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002264
Sasha Levin3df92b32012-05-27 22:36:56 +02002265 if (id < 0)
2266 return id;
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 sprintf(hdev->name, "hci%d", id);
2269 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002270
2271 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2272
Kees Cookd8537542013-07-03 15:04:57 -07002273 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2274 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002275 if (!hdev->workqueue) {
2276 error = -ENOMEM;
2277 goto err;
2278 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002279
Kees Cookd8537542013-07-03 15:04:57 -07002280 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2281 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002282 if (!hdev->req_workqueue) {
2283 destroy_workqueue(hdev->workqueue);
2284 error = -ENOMEM;
2285 goto err;
2286 }
2287
David Herrmann33ca9542011-10-08 14:58:49 +02002288 error = hci_add_sysfs(hdev);
2289 if (error < 0)
2290 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002292 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002293 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2294 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002295 if (hdev->rfkill) {
2296 if (rfkill_register(hdev->rfkill) < 0) {
2297 rfkill_destroy(hdev->rfkill);
2298 hdev->rfkill = NULL;
2299 }
2300 }
2301
Johan Hedberg5e130362013-09-13 08:58:17 +03002302 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2303 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2304
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002305 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002306
2307 if (hdev->dev_type != HCI_AMP)
2308 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2309
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002310 write_lock(&hci_dev_list_lock);
2311 list_add(&hdev->list, &hci_dev_list);
2312 write_unlock(&hci_dev_list_lock);
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002315 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316
Johan Hedberg19202572013-01-14 22:33:51 +02002317 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002318
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002320
David Herrmann33ca9542011-10-08 14:58:49 +02002321err_wqueue:
2322 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002323 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002324err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002325 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002326
David Herrmann33ca9542011-10-08 14:58:49 +02002327 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328}
2329EXPORT_SYMBOL(hci_register_dev);
2330
2331/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002332void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333{
Sasha Levin3df92b32012-05-27 22:36:56 +02002334 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002335
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002336 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337
Johan Hovold94324962012-03-15 14:48:41 +01002338 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2339
Sasha Levin3df92b32012-05-27 22:36:56 +02002340 id = hdev->id;
2341
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002342 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002344 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
2346 hci_dev_do_close(hdev);
2347
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302348 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002349 kfree_skb(hdev->reassembly[i]);
2350
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002351 cancel_work_sync(&hdev->power_on);
2352
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002353 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002354 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002355 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002356 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002357 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002358 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002359
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002360 /* mgmt_index_removed should take care of emptying the
2361 * pending list */
2362 BUG_ON(!list_empty(&hdev->mgmt_pending));
2363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 hci_notify(hdev, HCI_DEV_UNREG);
2365
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002366 if (hdev->rfkill) {
2367 rfkill_unregister(hdev->rfkill);
2368 rfkill_destroy(hdev->rfkill);
2369 }
2370
David Herrmannce242972011-10-08 14:58:48 +02002371 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002372
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002373 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002374 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002375
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002376 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002377 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002378 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002379 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002380 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002381 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002382 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002383
David Herrmanndc946bd2012-01-07 15:47:24 +01002384 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002385
2386 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387}
2388EXPORT_SYMBOL(hci_unregister_dev);
2389
2390/* Suspend HCI device */
2391int hci_suspend_dev(struct hci_dev *hdev)
2392{
2393 hci_notify(hdev, HCI_DEV_SUSPEND);
2394 return 0;
2395}
2396EXPORT_SYMBOL(hci_suspend_dev);
2397
2398/* Resume HCI device */
2399int hci_resume_dev(struct hci_dev *hdev)
2400{
2401 hci_notify(hdev, HCI_DEV_RESUME);
2402 return 0;
2403}
2404EXPORT_SYMBOL(hci_resume_dev);
2405
Marcel Holtmann76bca882009-11-18 00:40:39 +01002406/* Receive frame from HCI drivers */
2407int hci_recv_frame(struct sk_buff *skb)
2408{
2409 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2410 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002411 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002412 kfree_skb(skb);
2413 return -ENXIO;
2414 }
2415
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002416 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002417 bt_cb(skb)->incoming = 1;
2418
2419 /* Time stamp */
2420 __net_timestamp(skb);
2421
Marcel Holtmann76bca882009-11-18 00:40:39 +01002422 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002423 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002424
Marcel Holtmann76bca882009-11-18 00:40:39 +01002425 return 0;
2426}
2427EXPORT_SYMBOL(hci_recv_frame);
2428
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302429static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002430 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302431{
2432 int len = 0;
2433 int hlen = 0;
2434 int remain = count;
2435 struct sk_buff *skb;
2436 struct bt_skb_cb *scb;
2437
2438 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002439 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302440 return -EILSEQ;
2441
2442 skb = hdev->reassembly[index];
2443
2444 if (!skb) {
2445 switch (type) {
2446 case HCI_ACLDATA_PKT:
2447 len = HCI_MAX_FRAME_SIZE;
2448 hlen = HCI_ACL_HDR_SIZE;
2449 break;
2450 case HCI_EVENT_PKT:
2451 len = HCI_MAX_EVENT_SIZE;
2452 hlen = HCI_EVENT_HDR_SIZE;
2453 break;
2454 case HCI_SCODATA_PKT:
2455 len = HCI_MAX_SCO_SIZE;
2456 hlen = HCI_SCO_HDR_SIZE;
2457 break;
2458 }
2459
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002460 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302461 if (!skb)
2462 return -ENOMEM;
2463
2464 scb = (void *) skb->cb;
2465 scb->expect = hlen;
2466 scb->pkt_type = type;
2467
2468 skb->dev = (void *) hdev;
2469 hdev->reassembly[index] = skb;
2470 }
2471
2472 while (count) {
2473 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002474 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302475
2476 memcpy(skb_put(skb, len), data, len);
2477
2478 count -= len;
2479 data += len;
2480 scb->expect -= len;
2481 remain = count;
2482
2483 switch (type) {
2484 case HCI_EVENT_PKT:
2485 if (skb->len == HCI_EVENT_HDR_SIZE) {
2486 struct hci_event_hdr *h = hci_event_hdr(skb);
2487 scb->expect = h->plen;
2488
2489 if (skb_tailroom(skb) < scb->expect) {
2490 kfree_skb(skb);
2491 hdev->reassembly[index] = NULL;
2492 return -ENOMEM;
2493 }
2494 }
2495 break;
2496
2497 case HCI_ACLDATA_PKT:
2498 if (skb->len == HCI_ACL_HDR_SIZE) {
2499 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2500 scb->expect = __le16_to_cpu(h->dlen);
2501
2502 if (skb_tailroom(skb) < scb->expect) {
2503 kfree_skb(skb);
2504 hdev->reassembly[index] = NULL;
2505 return -ENOMEM;
2506 }
2507 }
2508 break;
2509
2510 case HCI_SCODATA_PKT:
2511 if (skb->len == HCI_SCO_HDR_SIZE) {
2512 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2513 scb->expect = h->dlen;
2514
2515 if (skb_tailroom(skb) < scb->expect) {
2516 kfree_skb(skb);
2517 hdev->reassembly[index] = NULL;
2518 return -ENOMEM;
2519 }
2520 }
2521 break;
2522 }
2523
2524 if (scb->expect == 0) {
2525 /* Complete frame */
2526
2527 bt_cb(skb)->pkt_type = type;
2528 hci_recv_frame(skb);
2529
2530 hdev->reassembly[index] = NULL;
2531 return remain;
2532 }
2533 }
2534
2535 return remain;
2536}
2537
Marcel Holtmannef222012007-07-11 06:42:04 +02002538int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2539{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302540 int rem = 0;
2541
Marcel Holtmannef222012007-07-11 06:42:04 +02002542 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2543 return -EILSEQ;
2544
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002545 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002546 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302547 if (rem < 0)
2548 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002549
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302550 data += (count - rem);
2551 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002552 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002553
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302554 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002555}
2556EXPORT_SYMBOL(hci_recv_fragment);
2557
Suraj Sumangala99811512010-07-14 13:02:19 +05302558#define STREAM_REASSEMBLY 0
2559
2560int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2561{
2562 int type;
2563 int rem = 0;
2564
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002565 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302566 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2567
2568 if (!skb) {
2569 struct { char type; } *pkt;
2570
2571 /* Start of the frame */
2572 pkt = data;
2573 type = pkt->type;
2574
2575 data++;
2576 count--;
2577 } else
2578 type = bt_cb(skb)->pkt_type;
2579
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002580 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002581 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302582 if (rem < 0)
2583 return rem;
2584
2585 data += (count - rem);
2586 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002587 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302588
2589 return rem;
2590}
2591EXPORT_SYMBOL(hci_recv_stream_fragment);
2592
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593/* ---- Interface to upper protocols ---- */
2594
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595int hci_register_cb(struct hci_cb *cb)
2596{
2597 BT_DBG("%p name %s", cb, cb->name);
2598
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002599 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002601 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602
2603 return 0;
2604}
2605EXPORT_SYMBOL(hci_register_cb);
2606
2607int hci_unregister_cb(struct hci_cb *cb)
2608{
2609 BT_DBG("%p name %s", cb, cb->name);
2610
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002611 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002613 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
2615 return 0;
2616}
2617EXPORT_SYMBOL(hci_unregister_cb);
2618
2619static int hci_send_frame(struct sk_buff *skb)
2620{
2621 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2622
2623 if (!hdev) {
2624 kfree_skb(skb);
2625 return -ENODEV;
2626 }
2627
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002628 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002630 /* Time stamp */
2631 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002633 /* Send copy to monitor */
2634 hci_send_to_monitor(hdev, skb);
2635
2636 if (atomic_read(&hdev->promisc)) {
2637 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002638 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 }
2640
2641 /* Get rid of skb owner, prior to sending to the driver. */
2642 skb_orphan(skb);
2643
2644 return hdev->send(skb);
2645}
2646
Johan Hedberg3119ae92013-03-05 20:37:44 +02002647void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2648{
2649 skb_queue_head_init(&req->cmd_q);
2650 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002651 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002652}
2653
2654int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2655{
2656 struct hci_dev *hdev = req->hdev;
2657 struct sk_buff *skb;
2658 unsigned long flags;
2659
2660 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2661
Andre Guedes5d73e032013-03-08 11:20:16 -03002662 /* If an error occured during request building, remove all HCI
2663 * commands queued on the HCI request queue.
2664 */
2665 if (req->err) {
2666 skb_queue_purge(&req->cmd_q);
2667 return req->err;
2668 }
2669
Johan Hedberg3119ae92013-03-05 20:37:44 +02002670 /* Do not allow empty requests */
2671 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002672 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002673
2674 skb = skb_peek_tail(&req->cmd_q);
2675 bt_cb(skb)->req.complete = complete;
2676
2677 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2678 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2679 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2680
2681 queue_work(hdev->workqueue, &hdev->cmd_work);
2682
2683 return 0;
2684}
2685
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002686static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002687 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688{
2689 int len = HCI_COMMAND_HDR_SIZE + plen;
2690 struct hci_command_hdr *hdr;
2691 struct sk_buff *skb;
2692
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002694 if (!skb)
2695 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696
2697 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002698 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 hdr->plen = plen;
2700
2701 if (plen)
2702 memcpy(skb_put(skb, plen), param, plen);
2703
2704 BT_DBG("skb len %d", skb->len);
2705
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002706 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002708
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002709 return skb;
2710}
2711
2712/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002713int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2714 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002715{
2716 struct sk_buff *skb;
2717
2718 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2719
2720 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2721 if (!skb) {
2722 BT_ERR("%s no memory for command", hdev->name);
2723 return -ENOMEM;
2724 }
2725
Johan Hedberg11714b32013-03-05 20:37:47 +02002726 /* Stand-alone HCI commands must be flaged as
2727 * single-command requests.
2728 */
2729 bt_cb(skb)->req.start = true;
2730
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002732 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733
2734 return 0;
2735}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
Johan Hedberg71c76a12013-03-05 20:37:46 +02002737/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002738void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2739 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002740{
2741 struct hci_dev *hdev = req->hdev;
2742 struct sk_buff *skb;
2743
2744 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2745
Andre Guedes34739c12013-03-08 11:20:18 -03002746 /* If an error occured during request building, there is no point in
2747 * queueing the HCI command. We can simply return.
2748 */
2749 if (req->err)
2750 return;
2751
Johan Hedberg71c76a12013-03-05 20:37:46 +02002752 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2753 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002754 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2755 hdev->name, opcode);
2756 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002757 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002758 }
2759
2760 if (skb_queue_empty(&req->cmd_q))
2761 bt_cb(skb)->req.start = true;
2762
Johan Hedberg02350a72013-04-03 21:50:29 +03002763 bt_cb(skb)->req.event = event;
2764
Johan Hedberg71c76a12013-03-05 20:37:46 +02002765 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002766}
2767
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002768void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2769 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002770{
2771 hci_req_add_ev(req, opcode, plen, param, 0);
2772}
2773
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002775void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776{
2777 struct hci_command_hdr *hdr;
2778
2779 if (!hdev->sent_cmd)
2780 return NULL;
2781
2782 hdr = (void *) hdev->sent_cmd->data;
2783
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002784 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 return NULL;
2786
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002787 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
2789 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2790}
2791
2792/* Send ACL data */
2793static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2794{
2795 struct hci_acl_hdr *hdr;
2796 int len = skb->len;
2797
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002798 skb_push(skb, HCI_ACL_HDR_SIZE);
2799 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002800 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002801 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2802 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803}
2804
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002805static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002806 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002808 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 struct hci_dev *hdev = conn->hdev;
2810 struct sk_buff *list;
2811
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002812 skb->len = skb_headlen(skb);
2813 skb->data_len = 0;
2814
2815 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002816
2817 switch (hdev->dev_type) {
2818 case HCI_BREDR:
2819 hci_add_acl_hdr(skb, conn->handle, flags);
2820 break;
2821 case HCI_AMP:
2822 hci_add_acl_hdr(skb, chan->handle, flags);
2823 break;
2824 default:
2825 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2826 return;
2827 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002828
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002829 list = skb_shinfo(skb)->frag_list;
2830 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 /* Non fragmented */
2832 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2833
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002834 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 } else {
2836 /* Fragmented */
2837 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2838
2839 skb_shinfo(skb)->frag_list = NULL;
2840
2841 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002842 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002844 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002845
2846 flags &= ~ACL_START;
2847 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 do {
2849 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002850
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002852 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002853 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
2855 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2856
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002857 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 } while (list);
2859
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002860 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002862}
2863
2864void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2865{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002866 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002867
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002868 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002869
2870 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002871
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002872 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002874 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876
2877/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002878void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879{
2880 struct hci_dev *hdev = conn->hdev;
2881 struct hci_sco_hdr hdr;
2882
2883 BT_DBG("%s len %d", hdev->name, skb->len);
2884
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002885 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 hdr.dlen = skb->len;
2887
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002888 skb_push(skb, HCI_SCO_HDR_SIZE);
2889 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002890 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891
2892 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002893 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002894
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002896 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898
2899/* ---- HCI TX task (outgoing data) ---- */
2900
2901/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002902static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2903 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904{
2905 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002906 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002907 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002909 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002911
2912 rcu_read_lock();
2913
2914 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002915 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002917
2918 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2919 continue;
2920
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 num++;
2922
2923 if (c->sent < min) {
2924 min = c->sent;
2925 conn = c;
2926 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002927
2928 if (hci_conn_num(hdev, type) == num)
2929 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 }
2931
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002932 rcu_read_unlock();
2933
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002935 int cnt, q;
2936
2937 switch (conn->type) {
2938 case ACL_LINK:
2939 cnt = hdev->acl_cnt;
2940 break;
2941 case SCO_LINK:
2942 case ESCO_LINK:
2943 cnt = hdev->sco_cnt;
2944 break;
2945 case LE_LINK:
2946 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2947 break;
2948 default:
2949 cnt = 0;
2950 BT_ERR("Unknown link type");
2951 }
2952
2953 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 *quote = q ? q : 1;
2955 } else
2956 *quote = 0;
2957
2958 BT_DBG("conn %p quote %d", conn, *quote);
2959 return conn;
2960}
2961
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002962static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963{
2964 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002965 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966
Ville Tervobae1f5d92011-02-10 22:38:53 -03002967 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002969 rcu_read_lock();
2970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002972 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002973 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002974 BT_ERR("%s killing stalled connection %pMR",
2975 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002976 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977 }
2978 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002979
2980 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981}
2982
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002983static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2984 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002985{
2986 struct hci_conn_hash *h = &hdev->conn_hash;
2987 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002988 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002989 struct hci_conn *conn;
2990 int cnt, q, conn_num = 0;
2991
2992 BT_DBG("%s", hdev->name);
2993
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002994 rcu_read_lock();
2995
2996 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002997 struct hci_chan *tmp;
2998
2999 if (conn->type != type)
3000 continue;
3001
3002 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3003 continue;
3004
3005 conn_num++;
3006
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003007 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003008 struct sk_buff *skb;
3009
3010 if (skb_queue_empty(&tmp->data_q))
3011 continue;
3012
3013 skb = skb_peek(&tmp->data_q);
3014 if (skb->priority < cur_prio)
3015 continue;
3016
3017 if (skb->priority > cur_prio) {
3018 num = 0;
3019 min = ~0;
3020 cur_prio = skb->priority;
3021 }
3022
3023 num++;
3024
3025 if (conn->sent < min) {
3026 min = conn->sent;
3027 chan = tmp;
3028 }
3029 }
3030
3031 if (hci_conn_num(hdev, type) == conn_num)
3032 break;
3033 }
3034
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003035 rcu_read_unlock();
3036
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003037 if (!chan)
3038 return NULL;
3039
3040 switch (chan->conn->type) {
3041 case ACL_LINK:
3042 cnt = hdev->acl_cnt;
3043 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003044 case AMP_LINK:
3045 cnt = hdev->block_cnt;
3046 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003047 case SCO_LINK:
3048 case ESCO_LINK:
3049 cnt = hdev->sco_cnt;
3050 break;
3051 case LE_LINK:
3052 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3053 break;
3054 default:
3055 cnt = 0;
3056 BT_ERR("Unknown link type");
3057 }
3058
3059 q = cnt / num;
3060 *quote = q ? q : 1;
3061 BT_DBG("chan %p quote %d", chan, *quote);
3062 return chan;
3063}
3064
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003065static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3066{
3067 struct hci_conn_hash *h = &hdev->conn_hash;
3068 struct hci_conn *conn;
3069 int num = 0;
3070
3071 BT_DBG("%s", hdev->name);
3072
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003073 rcu_read_lock();
3074
3075 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003076 struct hci_chan *chan;
3077
3078 if (conn->type != type)
3079 continue;
3080
3081 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3082 continue;
3083
3084 num++;
3085
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003086 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003087 struct sk_buff *skb;
3088
3089 if (chan->sent) {
3090 chan->sent = 0;
3091 continue;
3092 }
3093
3094 if (skb_queue_empty(&chan->data_q))
3095 continue;
3096
3097 skb = skb_peek(&chan->data_q);
3098 if (skb->priority >= HCI_PRIO_MAX - 1)
3099 continue;
3100
3101 skb->priority = HCI_PRIO_MAX - 1;
3102
3103 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003104 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003105 }
3106
3107 if (hci_conn_num(hdev, type) == num)
3108 break;
3109 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003110
3111 rcu_read_unlock();
3112
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003113}
3114
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003115static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3116{
3117 /* Calculate count of blocks used by this packet */
3118 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3119}
3120
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003121static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 if (!test_bit(HCI_RAW, &hdev->flags)) {
3124 /* ACL tx timeout must be longer than maximum
3125 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003126 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003127 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003128 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003130}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003132static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003133{
3134 unsigned int cnt = hdev->acl_cnt;
3135 struct hci_chan *chan;
3136 struct sk_buff *skb;
3137 int quote;
3138
3139 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003140
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003141 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003142 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003143 u32 priority = (skb_peek(&chan->data_q))->priority;
3144 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003145 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003146 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003147
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003148 /* Stop if priority has changed */
3149 if (skb->priority < priority)
3150 break;
3151
3152 skb = skb_dequeue(&chan->data_q);
3153
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003154 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003155 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003156
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 hci_send_frame(skb);
3158 hdev->acl_last_tx = jiffies;
3159
3160 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003161 chan->sent++;
3162 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 }
3164 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003165
3166 if (cnt != hdev->acl_cnt)
3167 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168}
3169
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003170static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003171{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003172 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003173 struct hci_chan *chan;
3174 struct sk_buff *skb;
3175 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003176 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003177
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003178 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003179
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003180 BT_DBG("%s", hdev->name);
3181
3182 if (hdev->dev_type == HCI_AMP)
3183 type = AMP_LINK;
3184 else
3185 type = ACL_LINK;
3186
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003187 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003188 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003189 u32 priority = (skb_peek(&chan->data_q))->priority;
3190 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3191 int blocks;
3192
3193 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003194 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003195
3196 /* Stop if priority has changed */
3197 if (skb->priority < priority)
3198 break;
3199
3200 skb = skb_dequeue(&chan->data_q);
3201
3202 blocks = __get_blocks(hdev, skb);
3203 if (blocks > hdev->block_cnt)
3204 return;
3205
3206 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003207 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003208
3209 hci_send_frame(skb);
3210 hdev->acl_last_tx = jiffies;
3211
3212 hdev->block_cnt -= blocks;
3213 quote -= blocks;
3214
3215 chan->sent += blocks;
3216 chan->conn->sent += blocks;
3217 }
3218 }
3219
3220 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003221 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003222}
3223
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003224static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003225{
3226 BT_DBG("%s", hdev->name);
3227
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003228 /* No ACL link over BR/EDR controller */
3229 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3230 return;
3231
3232 /* No AMP link over AMP controller */
3233 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003234 return;
3235
3236 switch (hdev->flow_ctl_mode) {
3237 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3238 hci_sched_acl_pkt(hdev);
3239 break;
3240
3241 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3242 hci_sched_acl_blk(hdev);
3243 break;
3244 }
3245}
3246
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003248static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249{
3250 struct hci_conn *conn;
3251 struct sk_buff *skb;
3252 int quote;
3253
3254 BT_DBG("%s", hdev->name);
3255
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003256 if (!hci_conn_num(hdev, SCO_LINK))
3257 return;
3258
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3260 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3261 BT_DBG("skb %p len %d", skb, skb->len);
3262 hci_send_frame(skb);
3263
3264 conn->sent++;
3265 if (conn->sent == ~0)
3266 conn->sent = 0;
3267 }
3268 }
3269}
3270
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003271static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003272{
3273 struct hci_conn *conn;
3274 struct sk_buff *skb;
3275 int quote;
3276
3277 BT_DBG("%s", hdev->name);
3278
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003279 if (!hci_conn_num(hdev, ESCO_LINK))
3280 return;
3281
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003282 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3283 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003284 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3285 BT_DBG("skb %p len %d", skb, skb->len);
3286 hci_send_frame(skb);
3287
3288 conn->sent++;
3289 if (conn->sent == ~0)
3290 conn->sent = 0;
3291 }
3292 }
3293}
3294
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003295static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003296{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003297 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003298 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003299 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003300
3301 BT_DBG("%s", hdev->name);
3302
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003303 if (!hci_conn_num(hdev, LE_LINK))
3304 return;
3305
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003306 if (!test_bit(HCI_RAW, &hdev->flags)) {
3307 /* LE tx timeout must be longer than maximum
3308 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003309 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003310 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003311 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003312 }
3313
3314 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003315 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003316 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003317 u32 priority = (skb_peek(&chan->data_q))->priority;
3318 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003319 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003320 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003321
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003322 /* Stop if priority has changed */
3323 if (skb->priority < priority)
3324 break;
3325
3326 skb = skb_dequeue(&chan->data_q);
3327
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003328 hci_send_frame(skb);
3329 hdev->le_last_tx = jiffies;
3330
3331 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003332 chan->sent++;
3333 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003334 }
3335 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003336
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003337 if (hdev->le_pkts)
3338 hdev->le_cnt = cnt;
3339 else
3340 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003341
3342 if (cnt != tmp)
3343 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003344}
3345
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003346static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003348 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 struct sk_buff *skb;
3350
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003351 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003352 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353
Marcel Holtmann52de5992013-09-03 18:08:38 -07003354 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3355 /* Schedule queues and send stuff to HCI driver */
3356 hci_sched_acl(hdev);
3357 hci_sched_sco(hdev);
3358 hci_sched_esco(hdev);
3359 hci_sched_le(hdev);
3360 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003361
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 /* Send next queued raw (unknown type) packet */
3363 while ((skb = skb_dequeue(&hdev->raw_q)))
3364 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365}
3366
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003367/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368
3369/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003370static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371{
3372 struct hci_acl_hdr *hdr = (void *) skb->data;
3373 struct hci_conn *conn;
3374 __u16 handle, flags;
3375
3376 skb_pull(skb, HCI_ACL_HDR_SIZE);
3377
3378 handle = __le16_to_cpu(hdr->handle);
3379 flags = hci_flags(handle);
3380 handle = hci_handle(handle);
3381
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003382 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003383 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384
3385 hdev->stat.acl_rx++;
3386
3387 hci_dev_lock(hdev);
3388 conn = hci_conn_hash_lookup_handle(hdev, handle);
3389 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003390
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003392 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003393
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003395 l2cap_recv_acldata(conn, skb, flags);
3396 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003398 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003399 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 }
3401
3402 kfree_skb(skb);
3403}
3404
3405/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003406static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407{
3408 struct hci_sco_hdr *hdr = (void *) skb->data;
3409 struct hci_conn *conn;
3410 __u16 handle;
3411
3412 skb_pull(skb, HCI_SCO_HDR_SIZE);
3413
3414 handle = __le16_to_cpu(hdr->handle);
3415
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003416 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
3418 hdev->stat.sco_rx++;
3419
3420 hci_dev_lock(hdev);
3421 conn = hci_conn_hash_lookup_handle(hdev, handle);
3422 hci_dev_unlock(hdev);
3423
3424 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003426 sco_recv_scodata(conn, skb);
3427 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003429 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003430 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 }
3432
3433 kfree_skb(skb);
3434}
3435
Johan Hedberg9238f362013-03-05 20:37:48 +02003436static bool hci_req_is_complete(struct hci_dev *hdev)
3437{
3438 struct sk_buff *skb;
3439
3440 skb = skb_peek(&hdev->cmd_q);
3441 if (!skb)
3442 return true;
3443
3444 return bt_cb(skb)->req.start;
3445}
3446
Johan Hedberg42c6b122013-03-05 20:37:49 +02003447static void hci_resend_last(struct hci_dev *hdev)
3448{
3449 struct hci_command_hdr *sent;
3450 struct sk_buff *skb;
3451 u16 opcode;
3452
3453 if (!hdev->sent_cmd)
3454 return;
3455
3456 sent = (void *) hdev->sent_cmd->data;
3457 opcode = __le16_to_cpu(sent->opcode);
3458 if (opcode == HCI_OP_RESET)
3459 return;
3460
3461 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3462 if (!skb)
3463 return;
3464
3465 skb_queue_head(&hdev->cmd_q, skb);
3466 queue_work(hdev->workqueue, &hdev->cmd_work);
3467}
3468
Johan Hedberg9238f362013-03-05 20:37:48 +02003469void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3470{
3471 hci_req_complete_t req_complete = NULL;
3472 struct sk_buff *skb;
3473 unsigned long flags;
3474
3475 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3476
Johan Hedberg42c6b122013-03-05 20:37:49 +02003477 /* If the completed command doesn't match the last one that was
3478 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003479 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003480 if (!hci_sent_cmd_data(hdev, opcode)) {
3481 /* Some CSR based controllers generate a spontaneous
3482 * reset complete event during init and any pending
3483 * command will never be completed. In such a case we
3484 * need to resend whatever was the last sent
3485 * command.
3486 */
3487 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3488 hci_resend_last(hdev);
3489
Johan Hedberg9238f362013-03-05 20:37:48 +02003490 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003491 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003492
3493 /* If the command succeeded and there's still more commands in
3494 * this request the request is not yet complete.
3495 */
3496 if (!status && !hci_req_is_complete(hdev))
3497 return;
3498
3499 /* If this was the last command in a request the complete
3500 * callback would be found in hdev->sent_cmd instead of the
3501 * command queue (hdev->cmd_q).
3502 */
3503 if (hdev->sent_cmd) {
3504 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003505
3506 if (req_complete) {
3507 /* We must set the complete callback to NULL to
3508 * avoid calling the callback more than once if
3509 * this function gets called again.
3510 */
3511 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3512
Johan Hedberg9238f362013-03-05 20:37:48 +02003513 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003514 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003515 }
3516
3517 /* Remove all pending commands belonging to this request */
3518 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3519 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3520 if (bt_cb(skb)->req.start) {
3521 __skb_queue_head(&hdev->cmd_q, skb);
3522 break;
3523 }
3524
3525 req_complete = bt_cb(skb)->req.complete;
3526 kfree_skb(skb);
3527 }
3528 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3529
3530call_complete:
3531 if (req_complete)
3532 req_complete(hdev, status);
3533}
3534
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003535static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003537 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 struct sk_buff *skb;
3539
3540 BT_DBG("%s", hdev->name);
3541
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003543 /* Send copy to monitor */
3544 hci_send_to_monitor(hdev, skb);
3545
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 if (atomic_read(&hdev->promisc)) {
3547 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003548 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 }
3550
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003551 if (test_bit(HCI_RAW, &hdev->flags) ||
3552 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 kfree_skb(skb);
3554 continue;
3555 }
3556
3557 if (test_bit(HCI_INIT, &hdev->flags)) {
3558 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003559 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 case HCI_ACLDATA_PKT:
3561 case HCI_SCODATA_PKT:
3562 kfree_skb(skb);
3563 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 }
3566
3567 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003568 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003570 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 hci_event_packet(hdev, skb);
3572 break;
3573
3574 case HCI_ACLDATA_PKT:
3575 BT_DBG("%s ACL data packet", hdev->name);
3576 hci_acldata_packet(hdev, skb);
3577 break;
3578
3579 case HCI_SCODATA_PKT:
3580 BT_DBG("%s SCO data packet", hdev->name);
3581 hci_scodata_packet(hdev, skb);
3582 break;
3583
3584 default:
3585 kfree_skb(skb);
3586 break;
3587 }
3588 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589}
3590
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003591static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003593 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 struct sk_buff *skb;
3595
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003596 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3597 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003600 if (atomic_read(&hdev->cmd_cnt)) {
3601 skb = skb_dequeue(&hdev->cmd_q);
3602 if (!skb)
3603 return;
3604
Wei Yongjun7585b972009-02-25 18:29:52 +08003605 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003607 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003608 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 atomic_dec(&hdev->cmd_cnt);
3610 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003611 if (test_bit(HCI_RESET, &hdev->flags))
3612 del_timer(&hdev->cmd_timer);
3613 else
3614 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003615 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 } else {
3617 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003618 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619 }
3620 }
3621}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003622
Andre Guedes31f79562012-04-24 21:02:53 -03003623u8 bdaddr_to_le(u8 bdaddr_type)
3624{
3625 switch (bdaddr_type) {
3626 case BDADDR_LE_PUBLIC:
3627 return ADDR_LE_DEV_PUBLIC;
3628
3629 default:
3630 /* Fallback to LE Random address type */
3631 return ADDR_LE_DEV_RANDOM;
3632 }
3633}