blob: 1bc43249d5a3db5d521f4b70bf5a2281bd3d9ebf [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300610static void hci_set_event_mask_page_2(struct hci_request *req)
611{
612 struct hci_dev *hdev = req->hdev;
613 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
614
615 /* If Connectionless Slave Broadcast master role is supported
616 * enable all necessary events for it.
617 */
618 if (hdev->features[2][0] & 0x01) {
619 events[1] |= 0x40; /* Triggered Clock Capture */
620 events[1] |= 0x80; /* Synchronization Train Complete */
621 events[2] |= 0x10; /* Slave Page Response Timeout */
622 events[2] |= 0x20; /* CSB Channel Map Change */
623 }
624
625 /* If Connectionless Slave Broadcast slave role is supported
626 * enable all necessary events for it.
627 */
628 if (hdev->features[2][0] & 0x02) {
629 events[2] |= 0x01; /* Synchronization Train Received */
630 events[2] |= 0x02; /* CSB Receive */
631 events[2] |= 0x04; /* CSB Timeout */
632 events[2] |= 0x08; /* Truncated Page Complete */
633 }
634
635 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
636}
637
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300641 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100643 /* Some Broadcom based Bluetooth controllers do not support the
644 * Delete Stored Link Key command. They are clearly indicating its
645 * absence in the bit mask of supported commands.
646 *
647 * Check the supported commands and only if the the command is marked
648 * as supported send it. If not supported assume that the controller
649 * does not have actual support for stored link keys which makes this
650 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700651 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300652 if (hdev->commands[6] & 0x80) {
653 struct hci_cp_delete_stored_link_key cp;
654
655 bacpy(&cp.bdaddr, BDADDR_ANY);
656 cp.delete_all = 0x01;
657 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
658 sizeof(cp), &cp);
659 }
660
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500664 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500666 hci_update_ad(req);
667 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300668
669 /* Read features beyond page 1 if available */
670 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671 struct hci_cp_read_local_ext_features cp;
672
673 cp.page = p;
674 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675 sizeof(cp), &cp);
676 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677}
678
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300679static void hci_init4_req(struct hci_request *req, unsigned long opt)
680{
681 struct hci_dev *hdev = req->hdev;
682
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300683 /* Set event mask page 2 if the HCI command for it is supported */
684 if (hdev->commands[22] & 0x04)
685 hci_set_event_mask_page_2(req);
686
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300687 /* Check for Synchronization Train support */
688 if (hdev->features[2][0] & 0x04)
689 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
690}
691
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692static int __hci_init(struct hci_dev *hdev)
693{
694 int err;
695
696 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
697 if (err < 0)
698 return err;
699
700 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701 * BR/EDR/LE type controllers. AMP controllers only need the
702 * first stage init.
703 */
704 if (hdev->dev_type != HCI_BREDR)
705 return 0;
706
707 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
708 if (err < 0)
709 return err;
710
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300711 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
712 if (err < 0)
713 return err;
714
715 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716}
717
Johan Hedberg42c6b122013-03-05 20:37:49 +0200718static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
720 __u8 scan = opt;
721
Johan Hedberg42c6b122013-03-05 20:37:49 +0200722 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200725 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
Johan Hedberg42c6b122013-03-05 20:37:49 +0200728static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
730 __u8 auth = opt;
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Johan Hedberg42c6b122013-03-05 20:37:49 +0200738static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
740 __u8 encrypt = opt;
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200744 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200745 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200749{
750 __le16 policy = cpu_to_le16(opt);
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200753
754 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200755 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200756}
757
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900758/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 * Device is held on return. */
760struct hci_dev *hci_dev_get(int index)
761{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200762 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764 BT_DBG("%d", index);
765
766 if (index < 0)
767 return NULL;
768
769 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200770 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (d->id == index) {
772 hdev = hci_dev_hold(d);
773 break;
774 }
775 }
776 read_unlock(&hci_dev_list_lock);
777 return hdev;
778}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200781
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200782bool hci_discovery_active(struct hci_dev *hdev)
783{
784 struct discovery_state *discov = &hdev->discovery;
785
Andre Guedes6fbe1952012-02-03 17:47:58 -0300786 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300787 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300788 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200789 return true;
790
Andre Guedes6fbe1952012-02-03 17:47:58 -0300791 default:
792 return false;
793 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200794}
795
Johan Hedbergff9ef572012-01-04 14:23:45 +0200796void hci_discovery_set_state(struct hci_dev *hdev, int state)
797{
798 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
799
800 if (hdev->discovery.state == state)
801 return;
802
803 switch (state) {
804 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300805 if (hdev->discovery.state != DISCOVERY_STARTING)
806 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200807 break;
808 case DISCOVERY_STARTING:
809 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300810 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200811 mgmt_discovering(hdev, 1);
812 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200813 case DISCOVERY_RESOLVING:
814 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200815 case DISCOVERY_STOPPING:
816 break;
817 }
818
819 hdev->discovery.state = state;
820}
821
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300822void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
Johan Hedberg30883512012-01-04 14:16:21 +0200824 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200825 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
Johan Hedberg561aafb2012-01-04 13:31:59 +0200827 list_for_each_entry_safe(p, n, &cache->all, all) {
828 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200829 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200831
832 INIT_LIST_HEAD(&cache->unknown);
833 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834}
835
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300836struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
837 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838{
Johan Hedberg30883512012-01-04 14:16:21 +0200839 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 struct inquiry_entry *e;
841
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300842 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Johan Hedberg561aafb2012-01-04 13:31:59 +0200844 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200846 return e;
847 }
848
849 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850}
851
Johan Hedberg561aafb2012-01-04 13:31:59 +0200852struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300853 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200854{
Johan Hedberg30883512012-01-04 14:16:21 +0200855 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856 struct inquiry_entry *e;
857
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300858 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200859
860 list_for_each_entry(e, &cache->unknown, list) {
861 if (!bacmp(&e->data.bdaddr, bdaddr))
862 return e;
863 }
864
865 return NULL;
866}
867
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200868struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300869 bdaddr_t *bdaddr,
870 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200871{
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_entry *e;
874
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300875 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200876
877 list_for_each_entry(e, &cache->resolve, list) {
878 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
879 return e;
880 if (!bacmp(&e->data.bdaddr, bdaddr))
881 return e;
882 }
883
884 return NULL;
885}
886
Johan Hedberga3d4e202012-01-09 00:53:02 +0200887void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300888 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200889{
890 struct discovery_state *cache = &hdev->discovery;
891 struct list_head *pos = &cache->resolve;
892 struct inquiry_entry *p;
893
894 list_del(&ie->list);
895
896 list_for_each_entry(p, &cache->resolve, list) {
897 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300898 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200899 break;
900 pos = &p->list;
901 }
902
903 list_add(&ie->list, pos);
904}
905
Johan Hedberg31754052012-01-04 13:39:52 +0200906bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300907 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Johan Hedberg30883512012-01-04 14:16:21 +0200909 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200910 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300912 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Szymon Janc2b2fec42012-11-20 11:38:54 +0100914 hci_remove_remote_oob_data(hdev, &data->bdaddr);
915
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200916 if (ssp)
917 *ssp = data->ssp_mode;
918
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200919 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200920 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200921 if (ie->data.ssp_mode && ssp)
922 *ssp = true;
923
Johan Hedberga3d4e202012-01-09 00:53:02 +0200924 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300925 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200926 ie->data.rssi = data->rssi;
927 hci_inquiry_cache_update_resolve(hdev, ie);
928 }
929
Johan Hedberg561aafb2012-01-04 13:31:59 +0200930 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200931 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200932
Johan Hedberg561aafb2012-01-04 13:31:59 +0200933 /* Entry not in the cache. Add new one. */
934 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
935 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200936 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200937
938 list_add(&ie->all, &cache->all);
939
940 if (name_known) {
941 ie->name_state = NAME_KNOWN;
942 } else {
943 ie->name_state = NAME_NOT_KNOWN;
944 list_add(&ie->list, &cache->unknown);
945 }
946
947update:
948 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300949 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200950 ie->name_state = NAME_KNOWN;
951 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200954 memcpy(&ie->data, data, sizeof(*data));
955 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200957
958 if (ie->name_state == NAME_NOT_KNOWN)
959 return false;
960
961 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962}
963
964static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
965{
Johan Hedberg30883512012-01-04 14:16:21 +0200966 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 struct inquiry_info *info = (struct inquiry_info *) buf;
968 struct inquiry_entry *e;
969 int copied = 0;
970
Johan Hedberg561aafb2012-01-04 13:31:59 +0200971 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200973
974 if (copied >= num)
975 break;
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 bacpy(&info->bdaddr, &data->bdaddr);
978 info->pscan_rep_mode = data->pscan_rep_mode;
979 info->pscan_period_mode = data->pscan_period_mode;
980 info->pscan_mode = data->pscan_mode;
981 memcpy(info->dev_class, data->dev_class, 3);
982 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200985 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987
988 BT_DBG("cache %p, copied %d", cache, copied);
989 return copied;
990}
991
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993{
994 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 struct hci_cp_inquiry cp;
997
998 BT_DBG("%s", hdev->name);
999
1000 if (test_bit(HCI_INQUIRY, &hdev->flags))
1001 return;
1002
1003 /* Start Inquiry */
1004 memcpy(&cp.lap, &ir->lap, 3);
1005 cp.length = ir->length;
1006 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008}
1009
Andre Guedes3e13fa12013-03-27 20:04:56 -03001010static int wait_inquiry(void *word)
1011{
1012 schedule();
1013 return signal_pending(current);
1014}
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016int hci_inquiry(void __user *arg)
1017{
1018 __u8 __user *ptr = arg;
1019 struct hci_inquiry_req ir;
1020 struct hci_dev *hdev;
1021 int err = 0, do_inquiry = 0, max_rsp;
1022 long timeo;
1023 __u8 *buf;
1024
1025 if (copy_from_user(&ir, ptr, sizeof(ir)))
1026 return -EFAULT;
1027
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001028 hdev = hci_dev_get(ir.dev_id);
1029 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 return -ENODEV;
1031
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001032 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1033 err = -EBUSY;
1034 goto done;
1035 }
1036
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001037 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001038 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001039 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001040 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 do_inquiry = 1;
1042 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001043 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Marcel Holtmann04837f62006-07-03 10:02:33 +02001045 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001046
1047 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001048 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1049 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001050 if (err < 0)
1051 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001052
1053 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054 * cleared). If it is interrupted by a signal, return -EINTR.
1055 */
1056 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057 TASK_INTERRUPTIBLE))
1058 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001061 /* for unlimited number of responses we will use buffer with
1062 * 255 entries
1063 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1065
1066 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067 * copy it to the user space.
1068 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001069 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001070 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 err = -ENOMEM;
1072 goto done;
1073 }
1074
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001075 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001077 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 BT_DBG("num_rsp %d", ir.num_rsp);
1080
1081 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1082 ptr += sizeof(ir);
1083 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001084 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001086 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 err = -EFAULT;
1088
1089 kfree(buf);
1090
1091done:
1092 hci_dev_put(hdev);
1093 return err;
1094}
1095
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001096static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1097{
1098 u8 ad_len = 0, flags = 0;
1099 size_t name_len;
1100
1101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102 flags |= LE_AD_GENERAL;
1103
1104 if (!lmp_bredr_capable(hdev))
1105 flags |= LE_AD_NO_BREDR;
1106
1107 if (lmp_le_br_capable(hdev))
1108 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1109
1110 if (lmp_host_le_br_capable(hdev))
1111 flags |= LE_AD_SIM_LE_BREDR_HOST;
1112
1113 if (flags) {
1114 BT_DBG("adv flags 0x%02x", flags);
1115
1116 ptr[0] = 2;
1117 ptr[1] = EIR_FLAGS;
1118 ptr[2] = flags;
1119
1120 ad_len += 3;
1121 ptr += 3;
1122 }
1123
1124 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1125 ptr[0] = 2;
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->adv_tx_power;
1128
1129 ad_len += 3;
1130 ptr += 3;
1131 }
1132
1133 name_len = strlen(hdev->dev_name);
1134 if (name_len > 0) {
1135 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1136
1137 if (name_len > max_len) {
1138 name_len = max_len;
1139 ptr[1] = EIR_NAME_SHORT;
1140 } else
1141 ptr[1] = EIR_NAME_COMPLETE;
1142
1143 ptr[0] = name_len + 1;
1144
1145 memcpy(ptr + 2, hdev->dev_name, name_len);
1146
1147 ad_len += (name_len + 2);
1148 ptr += (name_len + 2);
1149 }
1150
1151 return ad_len;
1152}
1153
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001154void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001155{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001156 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001157 struct hci_cp_le_set_adv_data cp;
1158 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001159
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001160 if (!lmp_le_capable(hdev))
1161 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001162
1163 memset(&cp, 0, sizeof(cp));
1164
1165 len = create_ad(hdev, cp.data);
1166
1167 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001168 memcmp(cp.data, hdev->adv_data, len) == 0)
1169 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001170
1171 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172 hdev->adv_data_len = len;
1173
1174 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001175
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001176 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001177}
1178
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001179static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 int ret = 0;
1182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 BT_DBG("%s %p", hdev->name, hdev);
1184
1185 hci_req_lock(hdev);
1186
Johan Hovold94324962012-03-15 14:48:41 +01001187 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1188 ret = -ENODEV;
1189 goto done;
1190 }
1191
Johan Hedbergbf543032013-09-13 08:58:18 +03001192 /* Check for rfkill but allow the HCI setup stage to proceed
1193 * (which in itself doesn't cause any RF activity).
1194 */
1195 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1196 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001197 ret = -ERFKILL;
1198 goto done;
1199 }
1200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 if (test_bit(HCI_UP, &hdev->flags)) {
1202 ret = -EALREADY;
1203 goto done;
1204 }
1205
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 if (hdev->open(hdev)) {
1207 ret = -EIO;
1208 goto done;
1209 }
1210
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001211 atomic_set(&hdev->cmd_cnt, 1);
1212 set_bit(HCI_INIT, &hdev->flags);
1213
1214 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1215 ret = hdev->setup(hdev);
1216
1217 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001218 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1219 set_bit(HCI_RAW, &hdev->flags);
1220
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001221 if (!test_bit(HCI_RAW, &hdev->flags) &&
1222 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001223 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 }
1225
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001226 clear_bit(HCI_INIT, &hdev->flags);
1227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 if (!ret) {
1229 hci_dev_hold(hdev);
1230 set_bit(HCI_UP, &hdev->flags);
1231 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001232 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001233 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001234 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001235 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001236 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001237 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001238 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001239 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001241 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001242 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001243 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
1245 skb_queue_purge(&hdev->cmd_q);
1246 skb_queue_purge(&hdev->rx_q);
1247
1248 if (hdev->flush)
1249 hdev->flush(hdev);
1250
1251 if (hdev->sent_cmd) {
1252 kfree_skb(hdev->sent_cmd);
1253 hdev->sent_cmd = NULL;
1254 }
1255
1256 hdev->close(hdev);
1257 hdev->flags = 0;
1258 }
1259
1260done:
1261 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 return ret;
1263}
1264
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001265/* ---- HCI ioctl helpers ---- */
1266
1267int hci_dev_open(__u16 dev)
1268{
1269 struct hci_dev *hdev;
1270 int err;
1271
1272 hdev = hci_dev_get(dev);
1273 if (!hdev)
1274 return -ENODEV;
1275
1276 err = hci_dev_do_open(hdev);
1277
1278 hci_dev_put(hdev);
1279
1280 return err;
1281}
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283static int hci_dev_do_close(struct hci_dev *hdev)
1284{
1285 BT_DBG("%s %p", hdev->name, hdev);
1286
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001287 cancel_delayed_work(&hdev->power_off);
1288
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 hci_req_cancel(hdev, ENODEV);
1290 hci_req_lock(hdev);
1291
1292 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001293 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 hci_req_unlock(hdev);
1295 return 0;
1296 }
1297
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001298 /* Flush RX and TX works */
1299 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001300 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001302 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001303 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001304 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001305 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001306 }
1307
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001308 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001309 cancel_delayed_work(&hdev->service_cache);
1310
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001311 cancel_delayed_work_sync(&hdev->le_scan_disable);
1312
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001313 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001314 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001316 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
1318 hci_notify(hdev, HCI_DEV_DOWN);
1319
1320 if (hdev->flush)
1321 hdev->flush(hdev);
1322
1323 /* Reset device */
1324 skb_queue_purge(&hdev->cmd_q);
1325 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001326 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001327 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001329 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 clear_bit(HCI_INIT, &hdev->flags);
1331 }
1332
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001333 /* flush cmd work */
1334 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
1336 /* Drop queues */
1337 skb_queue_purge(&hdev->rx_q);
1338 skb_queue_purge(&hdev->cmd_q);
1339 skb_queue_purge(&hdev->raw_q);
1340
1341 /* Drop last sent command */
1342 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001343 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 kfree_skb(hdev->sent_cmd);
1345 hdev->sent_cmd = NULL;
1346 }
1347
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001348 kfree_skb(hdev->recv_evt);
1349 hdev->recv_evt = NULL;
1350
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 /* After this point our queues are empty
1352 * and no tasks are scheduled. */
1353 hdev->close(hdev);
1354
Johan Hedberg35b973c2013-03-15 17:06:59 -05001355 /* Clear flags */
1356 hdev->flags = 0;
1357 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1358
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001359 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1360 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001361 hci_dev_lock(hdev);
1362 mgmt_powered(hdev, 0);
1363 hci_dev_unlock(hdev);
1364 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001365
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001366 /* Controller radio is available but is currently powered down */
1367 hdev->amp_status = 0;
1368
Johan Hedberge59fda82012-02-22 18:11:53 +02001369 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001370 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 hci_req_unlock(hdev);
1373
1374 hci_dev_put(hdev);
1375 return 0;
1376}
1377
1378int hci_dev_close(__u16 dev)
1379{
1380 struct hci_dev *hdev;
1381 int err;
1382
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001383 hdev = hci_dev_get(dev);
1384 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001386
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001387 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1388 err = -EBUSY;
1389 goto done;
1390 }
1391
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001392 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1393 cancel_delayed_work(&hdev->power_off);
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001396
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001397done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 hci_dev_put(hdev);
1399 return err;
1400}
1401
1402int hci_dev_reset(__u16 dev)
1403{
1404 struct hci_dev *hdev;
1405 int ret = 0;
1406
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001407 hdev = hci_dev_get(dev);
1408 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 return -ENODEV;
1410
1411 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412
Marcel Holtmann808a0492013-08-26 20:57:58 -07001413 if (!test_bit(HCI_UP, &hdev->flags)) {
1414 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001418 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1419 ret = -EBUSY;
1420 goto done;
1421 }
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 /* Drop queues */
1424 skb_queue_purge(&hdev->rx_q);
1425 skb_queue_purge(&hdev->cmd_q);
1426
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001427 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001428 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001430 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431
1432 if (hdev->flush)
1433 hdev->flush(hdev);
1434
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001435 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001436 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
1438 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001439 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
1441done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 hci_req_unlock(hdev);
1443 hci_dev_put(hdev);
1444 return ret;
1445}
1446
1447int hci_dev_reset_stat(__u16 dev)
1448{
1449 struct hci_dev *hdev;
1450 int ret = 0;
1451
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001452 hdev = hci_dev_get(dev);
1453 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 return -ENODEV;
1455
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001456 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1457 ret = -EBUSY;
1458 goto done;
1459 }
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1462
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001463done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 return ret;
1466}
1467
1468int hci_dev_cmd(unsigned int cmd, void __user *arg)
1469{
1470 struct hci_dev *hdev;
1471 struct hci_dev_req dr;
1472 int err = 0;
1473
1474 if (copy_from_user(&dr, arg, sizeof(dr)))
1475 return -EFAULT;
1476
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001477 hdev = hci_dev_get(dr.dev_id);
1478 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 return -ENODEV;
1480
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001481 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1482 err = -EBUSY;
1483 goto done;
1484 }
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 switch (cmd) {
1487 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001488 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1489 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 break;
1491
1492 case HCISETENCRYPT:
1493 if (!lmp_encrypt_capable(hdev)) {
1494 err = -EOPNOTSUPP;
1495 break;
1496 }
1497
1498 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1499 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001500 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1501 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 if (err)
1503 break;
1504 }
1505
Johan Hedberg01178cd2013-03-05 20:37:41 +02001506 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1507 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 break;
1509
1510 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001511 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1512 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 break;
1514
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001515 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001516 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1517 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001518 break;
1519
1520 case HCISETLINKMODE:
1521 hdev->link_mode = ((__u16) dr.dev_opt) &
1522 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1523 break;
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 case HCISETPTYPE:
1526 hdev->pkt_type = (__u16) dr.dev_opt;
1527 break;
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001530 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1531 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 break;
1533
1534 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001535 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1536 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 break;
1538
1539 default:
1540 err = -EINVAL;
1541 break;
1542 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001543
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001544done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 hci_dev_put(hdev);
1546 return err;
1547}
1548
1549int hci_get_dev_list(void __user *arg)
1550{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001551 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 struct hci_dev_list_req *dl;
1553 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 int n = 0, size, err;
1555 __u16 dev_num;
1556
1557 if (get_user(dev_num, (__u16 __user *) arg))
1558 return -EFAULT;
1559
1560 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1561 return -EINVAL;
1562
1563 size = sizeof(*dl) + dev_num * sizeof(*dr);
1564
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001565 dl = kzalloc(size, GFP_KERNEL);
1566 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 return -ENOMEM;
1568
1569 dr = dl->dev_req;
1570
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001571 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001572 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001574 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001575
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001576 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1577 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 (dr + n)->dev_id = hdev->id;
1580 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001581
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 if (++n >= dev_num)
1583 break;
1584 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001585 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587 dl->dev_num = n;
1588 size = sizeof(*dl) + n * sizeof(*dr);
1589
1590 err = copy_to_user(arg, dl, size);
1591 kfree(dl);
1592
1593 return err ? -EFAULT : 0;
1594}
1595
1596int hci_get_dev_info(void __user *arg)
1597{
1598 struct hci_dev *hdev;
1599 struct hci_dev_info di;
1600 int err = 0;
1601
1602 if (copy_from_user(&di, arg, sizeof(di)))
1603 return -EFAULT;
1604
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001605 hdev = hci_dev_get(di.dev_id);
1606 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 return -ENODEV;
1608
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001609 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001610 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001611
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001612 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1613 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001614
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 strcpy(di.name, hdev->name);
1616 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001617 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 di.flags = hdev->flags;
1619 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001620 if (lmp_bredr_capable(hdev)) {
1621 di.acl_mtu = hdev->acl_mtu;
1622 di.acl_pkts = hdev->acl_pkts;
1623 di.sco_mtu = hdev->sco_mtu;
1624 di.sco_pkts = hdev->sco_pkts;
1625 } else {
1626 di.acl_mtu = hdev->le_mtu;
1627 di.acl_pkts = hdev->le_pkts;
1628 di.sco_mtu = 0;
1629 di.sco_pkts = 0;
1630 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 di.link_policy = hdev->link_policy;
1632 di.link_mode = hdev->link_mode;
1633
1634 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1635 memcpy(&di.features, &hdev->features, sizeof(di.features));
1636
1637 if (copy_to_user(arg, &di, sizeof(di)))
1638 err = -EFAULT;
1639
1640 hci_dev_put(hdev);
1641
1642 return err;
1643}
1644
1645/* ---- Interface to HCI drivers ---- */
1646
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001647static int hci_rfkill_set_block(void *data, bool blocked)
1648{
1649 struct hci_dev *hdev = data;
1650
1651 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1652
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001653 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1654 return -EBUSY;
1655
Johan Hedberg5e130362013-09-13 08:58:17 +03001656 if (blocked) {
1657 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001658 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1659 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001660 } else {
1661 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001662 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001663
1664 return 0;
1665}
1666
1667static const struct rfkill_ops hci_rfkill_ops = {
1668 .set_block = hci_rfkill_set_block,
1669};
1670
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001671static void hci_power_on(struct work_struct *work)
1672{
1673 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001674 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001675
1676 BT_DBG("%s", hdev->name);
1677
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001678 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001679 if (err < 0) {
1680 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001681 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001682 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001683
Johan Hedbergbf543032013-09-13 08:58:18 +03001684 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1685 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1686 hci_dev_do_close(hdev);
1687 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001688 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1689 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001690 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001691
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001692 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001693 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001694}
1695
1696static void hci_power_off(struct work_struct *work)
1697{
Johan Hedberg32435532011-11-07 22:16:04 +02001698 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001699 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001700
1701 BT_DBG("%s", hdev->name);
1702
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001703 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001704}
1705
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001706static void hci_discov_off(struct work_struct *work)
1707{
1708 struct hci_dev *hdev;
1709 u8 scan = SCAN_PAGE;
1710
1711 hdev = container_of(work, struct hci_dev, discov_off.work);
1712
1713 BT_DBG("%s", hdev->name);
1714
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001715 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001716
1717 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1718
1719 hdev->discov_timeout = 0;
1720
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001721 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001722}
1723
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001724int hci_uuids_clear(struct hci_dev *hdev)
1725{
Johan Hedberg48210022013-01-27 00:31:28 +02001726 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001727
Johan Hedberg48210022013-01-27 00:31:28 +02001728 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1729 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001730 kfree(uuid);
1731 }
1732
1733 return 0;
1734}
1735
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001736int hci_link_keys_clear(struct hci_dev *hdev)
1737{
1738 struct list_head *p, *n;
1739
1740 list_for_each_safe(p, n, &hdev->link_keys) {
1741 struct link_key *key;
1742
1743 key = list_entry(p, struct link_key, list);
1744
1745 list_del(p);
1746 kfree(key);
1747 }
1748
1749 return 0;
1750}
1751
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001752int hci_smp_ltks_clear(struct hci_dev *hdev)
1753{
1754 struct smp_ltk *k, *tmp;
1755
1756 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1757 list_del(&k->list);
1758 kfree(k);
1759 }
1760
1761 return 0;
1762}
1763
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001764struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1765{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001766 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001767
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001768 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001769 if (bacmp(bdaddr, &k->bdaddr) == 0)
1770 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001771
1772 return NULL;
1773}
1774
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301775static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001776 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001777{
1778 /* Legacy key */
1779 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301780 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001781
1782 /* Debug keys are insecure so don't store them persistently */
1783 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301784 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001785
1786 /* Changed combination key and there's no previous one */
1787 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301788 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001789
1790 /* Security mode 3 case */
1791 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301792 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001793
1794 /* Neither local nor remote side had no-bonding as requirement */
1795 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301796 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001797
1798 /* Local side had dedicated bonding as requirement */
1799 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301800 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001801
1802 /* Remote side had dedicated bonding as requirement */
1803 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301804 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001805
1806 /* If none of the above criteria match, then don't store the key
1807 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301808 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001809}
1810
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001811struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001812{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001813 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001814
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001815 list_for_each_entry(k, &hdev->long_term_keys, list) {
1816 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001817 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001818 continue;
1819
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001820 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001821 }
1822
1823 return NULL;
1824}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001825
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001826struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001827 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001828{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001829 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001830
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001831 list_for_each_entry(k, &hdev->long_term_keys, list)
1832 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001833 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001834 return k;
1835
1836 return NULL;
1837}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001838
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001839int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001840 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001841{
1842 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301843 u8 old_key_type;
1844 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001845
1846 old_key = hci_find_link_key(hdev, bdaddr);
1847 if (old_key) {
1848 old_key_type = old_key->type;
1849 key = old_key;
1850 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001851 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001852 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1853 if (!key)
1854 return -ENOMEM;
1855 list_add(&key->list, &hdev->link_keys);
1856 }
1857
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001858 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001859
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001860 /* Some buggy controller combinations generate a changed
1861 * combination key for legacy pairing even when there's no
1862 * previous key */
1863 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001864 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001865 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001866 if (conn)
1867 conn->key_type = type;
1868 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001869
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001870 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001871 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001872 key->pin_len = pin_len;
1873
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001874 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001875 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001876 else
1877 key->type = type;
1878
Johan Hedberg4df378a2011-04-28 11:29:03 -07001879 if (!new_key)
1880 return 0;
1881
1882 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1883
Johan Hedberg744cf192011-11-08 20:40:14 +02001884 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001885
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301886 if (conn)
1887 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001888
1889 return 0;
1890}
1891
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001892int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001893 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001894 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001895{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001896 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001897
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001898 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1899 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001900
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001901 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1902 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001903 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001904 else {
1905 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001906 if (!key)
1907 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001908 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001909 }
1910
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001911 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001912 key->bdaddr_type = addr_type;
1913 memcpy(key->val, tk, sizeof(key->val));
1914 key->authenticated = authenticated;
1915 key->ediv = ediv;
1916 key->enc_size = enc_size;
1917 key->type = type;
1918 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001919
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001920 if (!new_key)
1921 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001922
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001923 if (type & HCI_SMP_LTK)
1924 mgmt_new_ltk(hdev, key, 1);
1925
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001926 return 0;
1927}
1928
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001929int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1930{
1931 struct link_key *key;
1932
1933 key = hci_find_link_key(hdev, bdaddr);
1934 if (!key)
1935 return -ENOENT;
1936
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001937 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001938
1939 list_del(&key->list);
1940 kfree(key);
1941
1942 return 0;
1943}
1944
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001945int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1946{
1947 struct smp_ltk *k, *tmp;
1948
1949 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1950 if (bacmp(bdaddr, &k->bdaddr))
1951 continue;
1952
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001953 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001954
1955 list_del(&k->list);
1956 kfree(k);
1957 }
1958
1959 return 0;
1960}
1961
Ville Tervo6bd32322011-02-16 16:32:41 +02001962/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001963static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001964{
1965 struct hci_dev *hdev = (void *) arg;
1966
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001967 if (hdev->sent_cmd) {
1968 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1969 u16 opcode = __le16_to_cpu(sent->opcode);
1970
1971 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1972 } else {
1973 BT_ERR("%s command tx timeout", hdev->name);
1974 }
1975
Ville Tervo6bd32322011-02-16 16:32:41 +02001976 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001977 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001978}
1979
Szymon Janc2763eda2011-03-22 13:12:22 +01001980struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001981 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001982{
1983 struct oob_data *data;
1984
1985 list_for_each_entry(data, &hdev->remote_oob_data, list)
1986 if (bacmp(bdaddr, &data->bdaddr) == 0)
1987 return data;
1988
1989 return NULL;
1990}
1991
1992int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1993{
1994 struct oob_data *data;
1995
1996 data = hci_find_remote_oob_data(hdev, bdaddr);
1997 if (!data)
1998 return -ENOENT;
1999
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002000 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002001
2002 list_del(&data->list);
2003 kfree(data);
2004
2005 return 0;
2006}
2007
2008int hci_remote_oob_data_clear(struct hci_dev *hdev)
2009{
2010 struct oob_data *data, *n;
2011
2012 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2013 list_del(&data->list);
2014 kfree(data);
2015 }
2016
2017 return 0;
2018}
2019
2020int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002021 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002022{
2023 struct oob_data *data;
2024
2025 data = hci_find_remote_oob_data(hdev, bdaddr);
2026
2027 if (!data) {
2028 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2029 if (!data)
2030 return -ENOMEM;
2031
2032 bacpy(&data->bdaddr, bdaddr);
2033 list_add(&data->list, &hdev->remote_oob_data);
2034 }
2035
2036 memcpy(data->hash, hash, sizeof(data->hash));
2037 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2038
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002039 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002040
2041 return 0;
2042}
2043
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002044struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002045{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002046 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002047
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002048 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002049 if (bacmp(bdaddr, &b->bdaddr) == 0)
2050 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002051
2052 return NULL;
2053}
2054
2055int hci_blacklist_clear(struct hci_dev *hdev)
2056{
2057 struct list_head *p, *n;
2058
2059 list_for_each_safe(p, n, &hdev->blacklist) {
2060 struct bdaddr_list *b;
2061
2062 b = list_entry(p, struct bdaddr_list, list);
2063
2064 list_del(p);
2065 kfree(b);
2066 }
2067
2068 return 0;
2069}
2070
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002071int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002072{
2073 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002074
2075 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2076 return -EBADF;
2077
Antti Julku5e762442011-08-25 16:48:02 +03002078 if (hci_blacklist_lookup(hdev, bdaddr))
2079 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002080
2081 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002082 if (!entry)
2083 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002084
2085 bacpy(&entry->bdaddr, bdaddr);
2086
2087 list_add(&entry->list, &hdev->blacklist);
2088
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002089 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002090}
2091
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002092int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002093{
2094 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002095
Szymon Janc1ec918c2011-11-16 09:32:21 +01002096 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002097 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002098
2099 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002100 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002101 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002102
2103 list_del(&entry->list);
2104 kfree(entry);
2105
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002106 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002107}
2108
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002109static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002110{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002111 if (status) {
2112 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002113
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002114 hci_dev_lock(hdev);
2115 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2116 hci_dev_unlock(hdev);
2117 return;
2118 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002119}
2120
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002121static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002122{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002123 /* General inquiry access code (GIAC) */
2124 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2125 struct hci_request req;
2126 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002127 int err;
2128
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002129 if (status) {
2130 BT_ERR("Failed to disable LE scanning: status %d", status);
2131 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002132 }
2133
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002134 switch (hdev->discovery.type) {
2135 case DISCOV_TYPE_LE:
2136 hci_dev_lock(hdev);
2137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2138 hci_dev_unlock(hdev);
2139 break;
2140
2141 case DISCOV_TYPE_INTERLEAVED:
2142 hci_req_init(&req, hdev);
2143
2144 memset(&cp, 0, sizeof(cp));
2145 memcpy(&cp.lap, lap, sizeof(cp.lap));
2146 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2147 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2148
2149 hci_dev_lock(hdev);
2150
2151 hci_inquiry_cache_flush(hdev);
2152
2153 err = hci_req_run(&req, inquiry_complete);
2154 if (err) {
2155 BT_ERR("Inquiry request failed: err %d", err);
2156 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2157 }
2158
2159 hci_dev_unlock(hdev);
2160 break;
2161 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002162}
2163
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002164static void le_scan_disable_work(struct work_struct *work)
2165{
2166 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002167 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002168 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002169 struct hci_request req;
2170 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002171
2172 BT_DBG("%s", hdev->name);
2173
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002174 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002175
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002176 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002177 cp.enable = LE_SCAN_DISABLE;
2178 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002179
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002180 err = hci_req_run(&req, le_scan_disable_work_complete);
2181 if (err)
2182 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002183}
2184
David Herrmann9be0dab2012-04-22 14:39:57 +02002185/* Alloc HCI device */
2186struct hci_dev *hci_alloc_dev(void)
2187{
2188 struct hci_dev *hdev;
2189
2190 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2191 if (!hdev)
2192 return NULL;
2193
David Herrmannb1b813d2012-04-22 14:39:58 +02002194 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2195 hdev->esco_type = (ESCO_HV1);
2196 hdev->link_mode = (HCI_LM_ACCEPT);
2197 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002198 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2199 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002200
David Herrmannb1b813d2012-04-22 14:39:58 +02002201 hdev->sniff_max_interval = 800;
2202 hdev->sniff_min_interval = 80;
2203
2204 mutex_init(&hdev->lock);
2205 mutex_init(&hdev->req_lock);
2206
2207 INIT_LIST_HEAD(&hdev->mgmt_pending);
2208 INIT_LIST_HEAD(&hdev->blacklist);
2209 INIT_LIST_HEAD(&hdev->uuids);
2210 INIT_LIST_HEAD(&hdev->link_keys);
2211 INIT_LIST_HEAD(&hdev->long_term_keys);
2212 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002213 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002214
2215 INIT_WORK(&hdev->rx_work, hci_rx_work);
2216 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2217 INIT_WORK(&hdev->tx_work, hci_tx_work);
2218 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002219
David Herrmannb1b813d2012-04-22 14:39:58 +02002220 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2221 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2222 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2223
David Herrmannb1b813d2012-04-22 14:39:58 +02002224 skb_queue_head_init(&hdev->rx_q);
2225 skb_queue_head_init(&hdev->cmd_q);
2226 skb_queue_head_init(&hdev->raw_q);
2227
2228 init_waitqueue_head(&hdev->req_wait_q);
2229
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002230 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002231
David Herrmannb1b813d2012-04-22 14:39:58 +02002232 hci_init_sysfs(hdev);
2233 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002234
2235 return hdev;
2236}
2237EXPORT_SYMBOL(hci_alloc_dev);
2238
2239/* Free HCI device */
2240void hci_free_dev(struct hci_dev *hdev)
2241{
David Herrmann9be0dab2012-04-22 14:39:57 +02002242 /* will free via device release */
2243 put_device(&hdev->dev);
2244}
2245EXPORT_SYMBOL(hci_free_dev);
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247/* Register HCI device */
2248int hci_register_dev(struct hci_dev *hdev)
2249{
David Herrmannb1b813d2012-04-22 14:39:58 +02002250 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251
David Herrmann010666a2012-01-07 15:47:07 +01002252 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 return -EINVAL;
2254
Mat Martineau08add512011-11-02 16:18:36 -07002255 /* Do not allow HCI_AMP devices to register at index 0,
2256 * so the index can be used as the AMP controller ID.
2257 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002258 switch (hdev->dev_type) {
2259 case HCI_BREDR:
2260 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2261 break;
2262 case HCI_AMP:
2263 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2264 break;
2265 default:
2266 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002268
Sasha Levin3df92b32012-05-27 22:36:56 +02002269 if (id < 0)
2270 return id;
2271
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 sprintf(hdev->name, "hci%d", id);
2273 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002274
2275 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2276
Kees Cookd8537542013-07-03 15:04:57 -07002277 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2278 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002279 if (!hdev->workqueue) {
2280 error = -ENOMEM;
2281 goto err;
2282 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002283
Kees Cookd8537542013-07-03 15:04:57 -07002284 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2285 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002286 if (!hdev->req_workqueue) {
2287 destroy_workqueue(hdev->workqueue);
2288 error = -ENOMEM;
2289 goto err;
2290 }
2291
David Herrmann33ca9542011-10-08 14:58:49 +02002292 error = hci_add_sysfs(hdev);
2293 if (error < 0)
2294 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002296 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002297 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2298 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002299 if (hdev->rfkill) {
2300 if (rfkill_register(hdev->rfkill) < 0) {
2301 rfkill_destroy(hdev->rfkill);
2302 hdev->rfkill = NULL;
2303 }
2304 }
2305
Johan Hedberg5e130362013-09-13 08:58:17 +03002306 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2307 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2308
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002309 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002310
2311 if (hdev->dev_type != HCI_AMP)
2312 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2313
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002314 write_lock(&hci_dev_list_lock);
2315 list_add(&hdev->list, &hci_dev_list);
2316 write_unlock(&hci_dev_list_lock);
2317
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002319 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320
Johan Hedberg19202572013-01-14 22:33:51 +02002321 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002324
David Herrmann33ca9542011-10-08 14:58:49 +02002325err_wqueue:
2326 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002327 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002328err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002329 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002330
David Herrmann33ca9542011-10-08 14:58:49 +02002331 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332}
2333EXPORT_SYMBOL(hci_register_dev);
2334
2335/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002336void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337{
Sasha Levin3df92b32012-05-27 22:36:56 +02002338 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002339
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002340 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341
Johan Hovold94324962012-03-15 14:48:41 +01002342 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2343
Sasha Levin3df92b32012-05-27 22:36:56 +02002344 id = hdev->id;
2345
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002346 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002348 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
2350 hci_dev_do_close(hdev);
2351
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302352 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002353 kfree_skb(hdev->reassembly[i]);
2354
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002355 cancel_work_sync(&hdev->power_on);
2356
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002357 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002358 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002359 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002360 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002361 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002362 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002363
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002364 /* mgmt_index_removed should take care of emptying the
2365 * pending list */
2366 BUG_ON(!list_empty(&hdev->mgmt_pending));
2367
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 hci_notify(hdev, HCI_DEV_UNREG);
2369
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002370 if (hdev->rfkill) {
2371 rfkill_unregister(hdev->rfkill);
2372 rfkill_destroy(hdev->rfkill);
2373 }
2374
David Herrmannce242972011-10-08 14:58:48 +02002375 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002376
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002377 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002378 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002379
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002380 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002381 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002382 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002383 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002384 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002385 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002386 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002387
David Herrmanndc946bd2012-01-07 15:47:24 +01002388 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002389
2390 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391}
2392EXPORT_SYMBOL(hci_unregister_dev);
2393
2394/* Suspend HCI device */
2395int hci_suspend_dev(struct hci_dev *hdev)
2396{
2397 hci_notify(hdev, HCI_DEV_SUSPEND);
2398 return 0;
2399}
2400EXPORT_SYMBOL(hci_suspend_dev);
2401
2402/* Resume HCI device */
2403int hci_resume_dev(struct hci_dev *hdev)
2404{
2405 hci_notify(hdev, HCI_DEV_RESUME);
2406 return 0;
2407}
2408EXPORT_SYMBOL(hci_resume_dev);
2409
Marcel Holtmann76bca882009-11-18 00:40:39 +01002410/* Receive frame from HCI drivers */
2411int hci_recv_frame(struct sk_buff *skb)
2412{
2413 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2414 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002415 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002416 kfree_skb(skb);
2417 return -ENXIO;
2418 }
2419
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002420 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002421 bt_cb(skb)->incoming = 1;
2422
2423 /* Time stamp */
2424 __net_timestamp(skb);
2425
Marcel Holtmann76bca882009-11-18 00:40:39 +01002426 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002427 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002428
Marcel Holtmann76bca882009-11-18 00:40:39 +01002429 return 0;
2430}
2431EXPORT_SYMBOL(hci_recv_frame);
2432
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302433static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002434 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302435{
2436 int len = 0;
2437 int hlen = 0;
2438 int remain = count;
2439 struct sk_buff *skb;
2440 struct bt_skb_cb *scb;
2441
2442 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002443 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302444 return -EILSEQ;
2445
2446 skb = hdev->reassembly[index];
2447
2448 if (!skb) {
2449 switch (type) {
2450 case HCI_ACLDATA_PKT:
2451 len = HCI_MAX_FRAME_SIZE;
2452 hlen = HCI_ACL_HDR_SIZE;
2453 break;
2454 case HCI_EVENT_PKT:
2455 len = HCI_MAX_EVENT_SIZE;
2456 hlen = HCI_EVENT_HDR_SIZE;
2457 break;
2458 case HCI_SCODATA_PKT:
2459 len = HCI_MAX_SCO_SIZE;
2460 hlen = HCI_SCO_HDR_SIZE;
2461 break;
2462 }
2463
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002464 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302465 if (!skb)
2466 return -ENOMEM;
2467
2468 scb = (void *) skb->cb;
2469 scb->expect = hlen;
2470 scb->pkt_type = type;
2471
2472 skb->dev = (void *) hdev;
2473 hdev->reassembly[index] = skb;
2474 }
2475
2476 while (count) {
2477 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002478 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302479
2480 memcpy(skb_put(skb, len), data, len);
2481
2482 count -= len;
2483 data += len;
2484 scb->expect -= len;
2485 remain = count;
2486
2487 switch (type) {
2488 case HCI_EVENT_PKT:
2489 if (skb->len == HCI_EVENT_HDR_SIZE) {
2490 struct hci_event_hdr *h = hci_event_hdr(skb);
2491 scb->expect = h->plen;
2492
2493 if (skb_tailroom(skb) < scb->expect) {
2494 kfree_skb(skb);
2495 hdev->reassembly[index] = NULL;
2496 return -ENOMEM;
2497 }
2498 }
2499 break;
2500
2501 case HCI_ACLDATA_PKT:
2502 if (skb->len == HCI_ACL_HDR_SIZE) {
2503 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2504 scb->expect = __le16_to_cpu(h->dlen);
2505
2506 if (skb_tailroom(skb) < scb->expect) {
2507 kfree_skb(skb);
2508 hdev->reassembly[index] = NULL;
2509 return -ENOMEM;
2510 }
2511 }
2512 break;
2513
2514 case HCI_SCODATA_PKT:
2515 if (skb->len == HCI_SCO_HDR_SIZE) {
2516 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2517 scb->expect = h->dlen;
2518
2519 if (skb_tailroom(skb) < scb->expect) {
2520 kfree_skb(skb);
2521 hdev->reassembly[index] = NULL;
2522 return -ENOMEM;
2523 }
2524 }
2525 break;
2526 }
2527
2528 if (scb->expect == 0) {
2529 /* Complete frame */
2530
2531 bt_cb(skb)->pkt_type = type;
2532 hci_recv_frame(skb);
2533
2534 hdev->reassembly[index] = NULL;
2535 return remain;
2536 }
2537 }
2538
2539 return remain;
2540}
2541
Marcel Holtmannef222012007-07-11 06:42:04 +02002542int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2543{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302544 int rem = 0;
2545
Marcel Holtmannef222012007-07-11 06:42:04 +02002546 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2547 return -EILSEQ;
2548
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002549 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002550 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302551 if (rem < 0)
2552 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002553
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302554 data += (count - rem);
2555 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002556 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002557
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302558 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002559}
2560EXPORT_SYMBOL(hci_recv_fragment);
2561
Suraj Sumangala99811512010-07-14 13:02:19 +05302562#define STREAM_REASSEMBLY 0
2563
2564int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2565{
2566 int type;
2567 int rem = 0;
2568
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002569 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302570 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2571
2572 if (!skb) {
2573 struct { char type; } *pkt;
2574
2575 /* Start of the frame */
2576 pkt = data;
2577 type = pkt->type;
2578
2579 data++;
2580 count--;
2581 } else
2582 type = bt_cb(skb)->pkt_type;
2583
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002584 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002585 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302586 if (rem < 0)
2587 return rem;
2588
2589 data += (count - rem);
2590 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002591 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302592
2593 return rem;
2594}
2595EXPORT_SYMBOL(hci_recv_stream_fragment);
2596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597/* ---- Interface to upper protocols ---- */
2598
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599int hci_register_cb(struct hci_cb *cb)
2600{
2601 BT_DBG("%p name %s", cb, cb->name);
2602
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002603 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002605 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606
2607 return 0;
2608}
2609EXPORT_SYMBOL(hci_register_cb);
2610
2611int hci_unregister_cb(struct hci_cb *cb)
2612{
2613 BT_DBG("%p name %s", cb, cb->name);
2614
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002615 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002617 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618
2619 return 0;
2620}
2621EXPORT_SYMBOL(hci_unregister_cb);
2622
2623static int hci_send_frame(struct sk_buff *skb)
2624{
2625 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2626
2627 if (!hdev) {
2628 kfree_skb(skb);
2629 return -ENODEV;
2630 }
2631
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002632 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002634 /* Time stamp */
2635 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002637 /* Send copy to monitor */
2638 hci_send_to_monitor(hdev, skb);
2639
2640 if (atomic_read(&hdev->promisc)) {
2641 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002642 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 }
2644
2645 /* Get rid of skb owner, prior to sending to the driver. */
2646 skb_orphan(skb);
2647
2648 return hdev->send(skb);
2649}
2650
Johan Hedberg3119ae92013-03-05 20:37:44 +02002651void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2652{
2653 skb_queue_head_init(&req->cmd_q);
2654 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002655 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002656}
2657
2658int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2659{
2660 struct hci_dev *hdev = req->hdev;
2661 struct sk_buff *skb;
2662 unsigned long flags;
2663
2664 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2665
Andre Guedes5d73e032013-03-08 11:20:16 -03002666 /* If an error occured during request building, remove all HCI
2667 * commands queued on the HCI request queue.
2668 */
2669 if (req->err) {
2670 skb_queue_purge(&req->cmd_q);
2671 return req->err;
2672 }
2673
Johan Hedberg3119ae92013-03-05 20:37:44 +02002674 /* Do not allow empty requests */
2675 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002676 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002677
2678 skb = skb_peek_tail(&req->cmd_q);
2679 bt_cb(skb)->req.complete = complete;
2680
2681 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2682 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2683 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2684
2685 queue_work(hdev->workqueue, &hdev->cmd_work);
2686
2687 return 0;
2688}
2689
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002690static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002691 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692{
2693 int len = HCI_COMMAND_HDR_SIZE + plen;
2694 struct hci_command_hdr *hdr;
2695 struct sk_buff *skb;
2696
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002698 if (!skb)
2699 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
2701 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002702 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 hdr->plen = plen;
2704
2705 if (plen)
2706 memcpy(skb_put(skb, plen), param, plen);
2707
2708 BT_DBG("skb len %d", skb->len);
2709
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002710 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002712
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002713 return skb;
2714}
2715
2716/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002717int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2718 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002719{
2720 struct sk_buff *skb;
2721
2722 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2723
2724 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2725 if (!skb) {
2726 BT_ERR("%s no memory for command", hdev->name);
2727 return -ENOMEM;
2728 }
2729
Johan Hedberg11714b32013-03-05 20:37:47 +02002730 /* Stand-alone HCI commands must be flaged as
2731 * single-command requests.
2732 */
2733 bt_cb(skb)->req.start = true;
2734
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002736 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737
2738 return 0;
2739}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740
Johan Hedberg71c76a12013-03-05 20:37:46 +02002741/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002742void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2743 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002744{
2745 struct hci_dev *hdev = req->hdev;
2746 struct sk_buff *skb;
2747
2748 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2749
Andre Guedes34739c12013-03-08 11:20:18 -03002750 /* If an error occured during request building, there is no point in
2751 * queueing the HCI command. We can simply return.
2752 */
2753 if (req->err)
2754 return;
2755
Johan Hedberg71c76a12013-03-05 20:37:46 +02002756 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2757 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002758 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2759 hdev->name, opcode);
2760 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002761 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002762 }
2763
2764 if (skb_queue_empty(&req->cmd_q))
2765 bt_cb(skb)->req.start = true;
2766
Johan Hedberg02350a72013-04-03 21:50:29 +03002767 bt_cb(skb)->req.event = event;
2768
Johan Hedberg71c76a12013-03-05 20:37:46 +02002769 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002770}
2771
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002772void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2773 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002774{
2775 hci_req_add_ev(req, opcode, plen, param, 0);
2776}
2777
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002779void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780{
2781 struct hci_command_hdr *hdr;
2782
2783 if (!hdev->sent_cmd)
2784 return NULL;
2785
2786 hdr = (void *) hdev->sent_cmd->data;
2787
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002788 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 return NULL;
2790
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002791 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
2793 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2794}
2795
2796/* Send ACL data */
2797static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2798{
2799 struct hci_acl_hdr *hdr;
2800 int len = skb->len;
2801
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002802 skb_push(skb, HCI_ACL_HDR_SIZE);
2803 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002804 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002805 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2806 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807}
2808
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002809static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002810 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002812 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 struct hci_dev *hdev = conn->hdev;
2814 struct sk_buff *list;
2815
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002816 skb->len = skb_headlen(skb);
2817 skb->data_len = 0;
2818
2819 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002820
2821 switch (hdev->dev_type) {
2822 case HCI_BREDR:
2823 hci_add_acl_hdr(skb, conn->handle, flags);
2824 break;
2825 case HCI_AMP:
2826 hci_add_acl_hdr(skb, chan->handle, flags);
2827 break;
2828 default:
2829 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2830 return;
2831 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002832
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002833 list = skb_shinfo(skb)->frag_list;
2834 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 /* Non fragmented */
2836 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2837
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002838 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 } else {
2840 /* Fragmented */
2841 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2842
2843 skb_shinfo(skb)->frag_list = NULL;
2844
2845 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002846 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002848 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002849
2850 flags &= ~ACL_START;
2851 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 do {
2853 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002854
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002856 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002857 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858
2859 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2860
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002861 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 } while (list);
2863
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002864 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002866}
2867
2868void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2869{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002870 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002871
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002872 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002873
2874 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002875
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002876 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002878 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880
2881/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002882void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883{
2884 struct hci_dev *hdev = conn->hdev;
2885 struct hci_sco_hdr hdr;
2886
2887 BT_DBG("%s len %d", hdev->name, skb->len);
2888
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002889 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 hdr.dlen = skb->len;
2891
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002892 skb_push(skb, HCI_SCO_HDR_SIZE);
2893 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002894 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895
2896 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002897 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002898
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002900 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
2903/* ---- HCI TX task (outgoing data) ---- */
2904
2905/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002906static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2907 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908{
2909 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002910 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002911 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002913 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002915
2916 rcu_read_lock();
2917
2918 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002919 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002921
2922 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2923 continue;
2924
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 num++;
2926
2927 if (c->sent < min) {
2928 min = c->sent;
2929 conn = c;
2930 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002931
2932 if (hci_conn_num(hdev, type) == num)
2933 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 }
2935
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002936 rcu_read_unlock();
2937
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002939 int cnt, q;
2940
2941 switch (conn->type) {
2942 case ACL_LINK:
2943 cnt = hdev->acl_cnt;
2944 break;
2945 case SCO_LINK:
2946 case ESCO_LINK:
2947 cnt = hdev->sco_cnt;
2948 break;
2949 case LE_LINK:
2950 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2951 break;
2952 default:
2953 cnt = 0;
2954 BT_ERR("Unknown link type");
2955 }
2956
2957 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 *quote = q ? q : 1;
2959 } else
2960 *quote = 0;
2961
2962 BT_DBG("conn %p quote %d", conn, *quote);
2963 return conn;
2964}
2965
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002966static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967{
2968 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002969 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970
Ville Tervobae1f5d92011-02-10 22:38:53 -03002971 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002973 rcu_read_lock();
2974
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002976 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002977 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002978 BT_ERR("%s killing stalled connection %pMR",
2979 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002980 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981 }
2982 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002983
2984 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985}
2986
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002987static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2988 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002989{
2990 struct hci_conn_hash *h = &hdev->conn_hash;
2991 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002992 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002993 struct hci_conn *conn;
2994 int cnt, q, conn_num = 0;
2995
2996 BT_DBG("%s", hdev->name);
2997
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002998 rcu_read_lock();
2999
3000 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003001 struct hci_chan *tmp;
3002
3003 if (conn->type != type)
3004 continue;
3005
3006 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3007 continue;
3008
3009 conn_num++;
3010
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003011 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003012 struct sk_buff *skb;
3013
3014 if (skb_queue_empty(&tmp->data_q))
3015 continue;
3016
3017 skb = skb_peek(&tmp->data_q);
3018 if (skb->priority < cur_prio)
3019 continue;
3020
3021 if (skb->priority > cur_prio) {
3022 num = 0;
3023 min = ~0;
3024 cur_prio = skb->priority;
3025 }
3026
3027 num++;
3028
3029 if (conn->sent < min) {
3030 min = conn->sent;
3031 chan = tmp;
3032 }
3033 }
3034
3035 if (hci_conn_num(hdev, type) == conn_num)
3036 break;
3037 }
3038
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003039 rcu_read_unlock();
3040
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003041 if (!chan)
3042 return NULL;
3043
3044 switch (chan->conn->type) {
3045 case ACL_LINK:
3046 cnt = hdev->acl_cnt;
3047 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003048 case AMP_LINK:
3049 cnt = hdev->block_cnt;
3050 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003051 case SCO_LINK:
3052 case ESCO_LINK:
3053 cnt = hdev->sco_cnt;
3054 break;
3055 case LE_LINK:
3056 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3057 break;
3058 default:
3059 cnt = 0;
3060 BT_ERR("Unknown link type");
3061 }
3062
3063 q = cnt / num;
3064 *quote = q ? q : 1;
3065 BT_DBG("chan %p quote %d", chan, *quote);
3066 return chan;
3067}
3068
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003069static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3070{
3071 struct hci_conn_hash *h = &hdev->conn_hash;
3072 struct hci_conn *conn;
3073 int num = 0;
3074
3075 BT_DBG("%s", hdev->name);
3076
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003077 rcu_read_lock();
3078
3079 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003080 struct hci_chan *chan;
3081
3082 if (conn->type != type)
3083 continue;
3084
3085 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3086 continue;
3087
3088 num++;
3089
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003090 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003091 struct sk_buff *skb;
3092
3093 if (chan->sent) {
3094 chan->sent = 0;
3095 continue;
3096 }
3097
3098 if (skb_queue_empty(&chan->data_q))
3099 continue;
3100
3101 skb = skb_peek(&chan->data_q);
3102 if (skb->priority >= HCI_PRIO_MAX - 1)
3103 continue;
3104
3105 skb->priority = HCI_PRIO_MAX - 1;
3106
3107 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003108 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003109 }
3110
3111 if (hci_conn_num(hdev, type) == num)
3112 break;
3113 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003114
3115 rcu_read_unlock();
3116
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003117}
3118
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003119static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3120{
3121 /* Calculate count of blocks used by this packet */
3122 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3123}
3124
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003125static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 if (!test_bit(HCI_RAW, &hdev->flags)) {
3128 /* ACL tx timeout must be longer than maximum
3129 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003130 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003131 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003132 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003134}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003136static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003137{
3138 unsigned int cnt = hdev->acl_cnt;
3139 struct hci_chan *chan;
3140 struct sk_buff *skb;
3141 int quote;
3142
3143 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003144
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003145 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003146 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003147 u32 priority = (skb_peek(&chan->data_q))->priority;
3148 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003149 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003150 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003151
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003152 /* Stop if priority has changed */
3153 if (skb->priority < priority)
3154 break;
3155
3156 skb = skb_dequeue(&chan->data_q);
3157
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003158 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003159 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003160
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 hci_send_frame(skb);
3162 hdev->acl_last_tx = jiffies;
3163
3164 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003165 chan->sent++;
3166 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 }
3168 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003169
3170 if (cnt != hdev->acl_cnt)
3171 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172}
3173
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003174static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003175{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003176 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003177 struct hci_chan *chan;
3178 struct sk_buff *skb;
3179 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003180 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003181
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003182 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003183
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003184 BT_DBG("%s", hdev->name);
3185
3186 if (hdev->dev_type == HCI_AMP)
3187 type = AMP_LINK;
3188 else
3189 type = ACL_LINK;
3190
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003191 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003192 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003193 u32 priority = (skb_peek(&chan->data_q))->priority;
3194 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3195 int blocks;
3196
3197 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003198 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003199
3200 /* Stop if priority has changed */
3201 if (skb->priority < priority)
3202 break;
3203
3204 skb = skb_dequeue(&chan->data_q);
3205
3206 blocks = __get_blocks(hdev, skb);
3207 if (blocks > hdev->block_cnt)
3208 return;
3209
3210 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003211 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003212
3213 hci_send_frame(skb);
3214 hdev->acl_last_tx = jiffies;
3215
3216 hdev->block_cnt -= blocks;
3217 quote -= blocks;
3218
3219 chan->sent += blocks;
3220 chan->conn->sent += blocks;
3221 }
3222 }
3223
3224 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003225 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003226}
3227
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003228static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003229{
3230 BT_DBG("%s", hdev->name);
3231
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003232 /* No ACL link over BR/EDR controller */
3233 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3234 return;
3235
3236 /* No AMP link over AMP controller */
3237 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003238 return;
3239
3240 switch (hdev->flow_ctl_mode) {
3241 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3242 hci_sched_acl_pkt(hdev);
3243 break;
3244
3245 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3246 hci_sched_acl_blk(hdev);
3247 break;
3248 }
3249}
3250
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003252static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253{
3254 struct hci_conn *conn;
3255 struct sk_buff *skb;
3256 int quote;
3257
3258 BT_DBG("%s", hdev->name);
3259
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003260 if (!hci_conn_num(hdev, SCO_LINK))
3261 return;
3262
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3264 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3265 BT_DBG("skb %p len %d", skb, skb->len);
3266 hci_send_frame(skb);
3267
3268 conn->sent++;
3269 if (conn->sent == ~0)
3270 conn->sent = 0;
3271 }
3272 }
3273}
3274
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003275static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003276{
3277 struct hci_conn *conn;
3278 struct sk_buff *skb;
3279 int quote;
3280
3281 BT_DBG("%s", hdev->name);
3282
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003283 if (!hci_conn_num(hdev, ESCO_LINK))
3284 return;
3285
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003286 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3287 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003288 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3289 BT_DBG("skb %p len %d", skb, skb->len);
3290 hci_send_frame(skb);
3291
3292 conn->sent++;
3293 if (conn->sent == ~0)
3294 conn->sent = 0;
3295 }
3296 }
3297}
3298
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003299static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003300{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003301 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003302 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003303 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003304
3305 BT_DBG("%s", hdev->name);
3306
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003307 if (!hci_conn_num(hdev, LE_LINK))
3308 return;
3309
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003310 if (!test_bit(HCI_RAW, &hdev->flags)) {
3311 /* LE tx timeout must be longer than maximum
3312 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003313 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003314 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003315 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003316 }
3317
3318 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003319 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003320 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003321 u32 priority = (skb_peek(&chan->data_q))->priority;
3322 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003323 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003324 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003325
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003326 /* Stop if priority has changed */
3327 if (skb->priority < priority)
3328 break;
3329
3330 skb = skb_dequeue(&chan->data_q);
3331
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003332 hci_send_frame(skb);
3333 hdev->le_last_tx = jiffies;
3334
3335 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003336 chan->sent++;
3337 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003338 }
3339 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003340
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003341 if (hdev->le_pkts)
3342 hdev->le_cnt = cnt;
3343 else
3344 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003345
3346 if (cnt != tmp)
3347 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003348}
3349
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003350static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003352 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 struct sk_buff *skb;
3354
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003355 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003356 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357
Marcel Holtmann52de5992013-09-03 18:08:38 -07003358 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3359 /* Schedule queues and send stuff to HCI driver */
3360 hci_sched_acl(hdev);
3361 hci_sched_sco(hdev);
3362 hci_sched_esco(hdev);
3363 hci_sched_le(hdev);
3364 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003365
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 /* Send next queued raw (unknown type) packet */
3367 while ((skb = skb_dequeue(&hdev->raw_q)))
3368 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369}
3370
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003371/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372
3373/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003374static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375{
3376 struct hci_acl_hdr *hdr = (void *) skb->data;
3377 struct hci_conn *conn;
3378 __u16 handle, flags;
3379
3380 skb_pull(skb, HCI_ACL_HDR_SIZE);
3381
3382 handle = __le16_to_cpu(hdr->handle);
3383 flags = hci_flags(handle);
3384 handle = hci_handle(handle);
3385
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003386 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003387 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388
3389 hdev->stat.acl_rx++;
3390
3391 hci_dev_lock(hdev);
3392 conn = hci_conn_hash_lookup_handle(hdev, handle);
3393 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003394
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003396 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003397
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003399 l2cap_recv_acldata(conn, skb, flags);
3400 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003402 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003403 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 }
3405
3406 kfree_skb(skb);
3407}
3408
3409/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003410static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411{
3412 struct hci_sco_hdr *hdr = (void *) skb->data;
3413 struct hci_conn *conn;
3414 __u16 handle;
3415
3416 skb_pull(skb, HCI_SCO_HDR_SIZE);
3417
3418 handle = __le16_to_cpu(hdr->handle);
3419
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003420 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421
3422 hdev->stat.sco_rx++;
3423
3424 hci_dev_lock(hdev);
3425 conn = hci_conn_hash_lookup_handle(hdev, handle);
3426 hci_dev_unlock(hdev);
3427
3428 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003430 sco_recv_scodata(conn, skb);
3431 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003433 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003434 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 }
3436
3437 kfree_skb(skb);
3438}
3439
Johan Hedberg9238f362013-03-05 20:37:48 +02003440static bool hci_req_is_complete(struct hci_dev *hdev)
3441{
3442 struct sk_buff *skb;
3443
3444 skb = skb_peek(&hdev->cmd_q);
3445 if (!skb)
3446 return true;
3447
3448 return bt_cb(skb)->req.start;
3449}
3450
Johan Hedberg42c6b122013-03-05 20:37:49 +02003451static void hci_resend_last(struct hci_dev *hdev)
3452{
3453 struct hci_command_hdr *sent;
3454 struct sk_buff *skb;
3455 u16 opcode;
3456
3457 if (!hdev->sent_cmd)
3458 return;
3459
3460 sent = (void *) hdev->sent_cmd->data;
3461 opcode = __le16_to_cpu(sent->opcode);
3462 if (opcode == HCI_OP_RESET)
3463 return;
3464
3465 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3466 if (!skb)
3467 return;
3468
3469 skb_queue_head(&hdev->cmd_q, skb);
3470 queue_work(hdev->workqueue, &hdev->cmd_work);
3471}
3472
Johan Hedberg9238f362013-03-05 20:37:48 +02003473void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3474{
3475 hci_req_complete_t req_complete = NULL;
3476 struct sk_buff *skb;
3477 unsigned long flags;
3478
3479 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3480
Johan Hedberg42c6b122013-03-05 20:37:49 +02003481 /* If the completed command doesn't match the last one that was
3482 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003483 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003484 if (!hci_sent_cmd_data(hdev, opcode)) {
3485 /* Some CSR based controllers generate a spontaneous
3486 * reset complete event during init and any pending
3487 * command will never be completed. In such a case we
3488 * need to resend whatever was the last sent
3489 * command.
3490 */
3491 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3492 hci_resend_last(hdev);
3493
Johan Hedberg9238f362013-03-05 20:37:48 +02003494 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003495 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003496
3497 /* If the command succeeded and there's still more commands in
3498 * this request the request is not yet complete.
3499 */
3500 if (!status && !hci_req_is_complete(hdev))
3501 return;
3502
3503 /* If this was the last command in a request the complete
3504 * callback would be found in hdev->sent_cmd instead of the
3505 * command queue (hdev->cmd_q).
3506 */
3507 if (hdev->sent_cmd) {
3508 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003509
3510 if (req_complete) {
3511 /* We must set the complete callback to NULL to
3512 * avoid calling the callback more than once if
3513 * this function gets called again.
3514 */
3515 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3516
Johan Hedberg9238f362013-03-05 20:37:48 +02003517 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003518 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003519 }
3520
3521 /* Remove all pending commands belonging to this request */
3522 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3523 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3524 if (bt_cb(skb)->req.start) {
3525 __skb_queue_head(&hdev->cmd_q, skb);
3526 break;
3527 }
3528
3529 req_complete = bt_cb(skb)->req.complete;
3530 kfree_skb(skb);
3531 }
3532 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3533
3534call_complete:
3535 if (req_complete)
3536 req_complete(hdev, status);
3537}
3538
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003539static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003541 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 struct sk_buff *skb;
3543
3544 BT_DBG("%s", hdev->name);
3545
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003547 /* Send copy to monitor */
3548 hci_send_to_monitor(hdev, skb);
3549
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 if (atomic_read(&hdev->promisc)) {
3551 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003552 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 }
3554
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003555 if (test_bit(HCI_RAW, &hdev->flags) ||
3556 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 kfree_skb(skb);
3558 continue;
3559 }
3560
3561 if (test_bit(HCI_INIT, &hdev->flags)) {
3562 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003563 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 case HCI_ACLDATA_PKT:
3565 case HCI_SCODATA_PKT:
3566 kfree_skb(skb);
3567 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 }
3570
3571 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003572 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003574 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 hci_event_packet(hdev, skb);
3576 break;
3577
3578 case HCI_ACLDATA_PKT:
3579 BT_DBG("%s ACL data packet", hdev->name);
3580 hci_acldata_packet(hdev, skb);
3581 break;
3582
3583 case HCI_SCODATA_PKT:
3584 BT_DBG("%s SCO data packet", hdev->name);
3585 hci_scodata_packet(hdev, skb);
3586 break;
3587
3588 default:
3589 kfree_skb(skb);
3590 break;
3591 }
3592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593}
3594
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003595static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003597 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 struct sk_buff *skb;
3599
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003600 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3601 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003604 if (atomic_read(&hdev->cmd_cnt)) {
3605 skb = skb_dequeue(&hdev->cmd_q);
3606 if (!skb)
3607 return;
3608
Wei Yongjun7585b972009-02-25 18:29:52 +08003609 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003611 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003612 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 atomic_dec(&hdev->cmd_cnt);
3614 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003615 if (test_bit(HCI_RESET, &hdev->flags))
3616 del_timer(&hdev->cmd_timer);
3617 else
3618 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003619 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 } else {
3621 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003622 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 }
3624 }
3625}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003626
Andre Guedes31f79562012-04-24 21:02:53 -03003627u8 bdaddr_to_le(u8 bdaddr_type)
3628{
3629 switch (bdaddr_type) {
3630 case BDADDR_LE_PUBLIC:
3631 return ADDR_LE_DEV_PUBLIC;
3632
3633 default:
3634 /* Fallback to LE Random address type */
3635 return ADDR_LE_DEV_RANDOM;
3636 }
3637}