blob: 48e1e0438f3a030b4c09312a9e0db81d808e6072 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
457 }
458
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
464
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
470
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
488 */
489 }
490
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
498 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501 }
502}
503
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 struct hci_dev *hdev = req->hdev;
507
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510
511 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513
Johan Hedberg42c6b122013-03-05 20:37:49 +0200514 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200515
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300516 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
517 * local supported commands HCI command.
518 */
519 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200520 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200521
522 if (lmp_ssp_capable(hdev)) {
523 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
524 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
526 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527 } else {
528 struct hci_cp_write_eir cp;
529
530 memset(hdev->eir, 0, sizeof(hdev->eir));
531 memset(&cp, 0, sizeof(cp));
532
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200534 }
535 }
536
537 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539
540 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200541 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542
543 if (lmp_ext_feat_capable(hdev)) {
544 struct hci_cp_read_local_ext_features cp;
545
546 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200547 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
548 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549 }
550
551 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
552 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
554 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200555 }
556}
557
Johan Hedberg42c6b122013-03-05 20:37:49 +0200558static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200559{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200560 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 struct hci_cp_write_def_link_policy cp;
562 u16 link_policy = 0;
563
564 if (lmp_rswitch_capable(hdev))
565 link_policy |= HCI_LP_RSWITCH;
566 if (lmp_hold_capable(hdev))
567 link_policy |= HCI_LP_HOLD;
568 if (lmp_sniff_capable(hdev))
569 link_policy |= HCI_LP_SNIFF;
570 if (lmp_park_capable(hdev))
571 link_policy |= HCI_LP_PARK;
572
573 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575}
576
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200580 struct hci_cp_write_le_host_supported cp;
581
Johan Hedbergc73eee92013-04-19 18:35:21 +0300582 /* LE-only devices do not support explicit enablement */
583 if (!lmp_bredr_capable(hdev))
584 return;
585
Johan Hedberg2177bab2013-03-05 20:37:43 +0200586 memset(&cp, 0, sizeof(cp));
587
588 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
589 cp.le = 0x01;
590 cp.simul = lmp_le_br_capable(hdev);
591 }
592
593 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
595 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596}
597
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200599{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200600 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300601 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200602
Johan Hedberg59f45d52013-06-13 11:01:13 +0300603 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
604 if (hdev->commands[6] & 0x80) {
605 struct hci_cp_delete_stored_link_key cp;
606
607 bacpy(&cp.bdaddr, BDADDR_ANY);
608 cp.delete_all = 0x01;
609 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
610 sizeof(cp), &cp);
611 }
612
Johan Hedberg2177bab2013-03-05 20:37:43 +0200613 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200615
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500616 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200617 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500618 hci_update_ad(req);
619 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300620
621 /* Read features beyond page 1 if available */
622 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
623 struct hci_cp_read_local_ext_features cp;
624
625 cp.page = p;
626 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
627 sizeof(cp), &cp);
628 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200629}
630
631static int __hci_init(struct hci_dev *hdev)
632{
633 int err;
634
635 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
636 if (err < 0)
637 return err;
638
639 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
640 * BR/EDR/LE type controllers. AMP controllers only need the
641 * first stage init.
642 */
643 if (hdev->dev_type != HCI_BREDR)
644 return 0;
645
646 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
647 if (err < 0)
648 return err;
649
650 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
651}
652
Johan Hedberg42c6b122013-03-05 20:37:49 +0200653static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
655 __u8 scan = opt;
656
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
659 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200660 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661}
662
Johan Hedberg42c6b122013-03-05 20:37:49 +0200663static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
665 __u8 auth = opt;
666
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200670 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671}
672
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
675 __u8 encrypt = opt;
676
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200679 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200684{
685 __le16 policy = cpu_to_le16(opt);
686
Johan Hedberg42c6b122013-03-05 20:37:49 +0200687 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200688
689 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200690 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200691}
692
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900693/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 * Device is held on return. */
695struct hci_dev *hci_dev_get(int index)
696{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200697 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699 BT_DBG("%d", index);
700
701 if (index < 0)
702 return NULL;
703
704 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200705 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 if (d->id == index) {
707 hdev = hci_dev_hold(d);
708 break;
709 }
710 }
711 read_unlock(&hci_dev_list_lock);
712 return hdev;
713}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
715/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200716
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200717bool hci_discovery_active(struct hci_dev *hdev)
718{
719 struct discovery_state *discov = &hdev->discovery;
720
Andre Guedes6fbe1952012-02-03 17:47:58 -0300721 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300722 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300723 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200724 return true;
725
Andre Guedes6fbe1952012-02-03 17:47:58 -0300726 default:
727 return false;
728 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200729}
730
Johan Hedbergff9ef572012-01-04 14:23:45 +0200731void hci_discovery_set_state(struct hci_dev *hdev, int state)
732{
733 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
734
735 if (hdev->discovery.state == state)
736 return;
737
738 switch (state) {
739 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300740 if (hdev->discovery.state != DISCOVERY_STARTING)
741 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200742 break;
743 case DISCOVERY_STARTING:
744 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300745 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200746 mgmt_discovering(hdev, 1);
747 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200748 case DISCOVERY_RESOLVING:
749 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200750 case DISCOVERY_STOPPING:
751 break;
752 }
753
754 hdev->discovery.state = state;
755}
756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757static void inquiry_cache_flush(struct hci_dev *hdev)
758{
Johan Hedberg30883512012-01-04 14:16:21 +0200759 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200760 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Johan Hedberg561aafb2012-01-04 13:31:59 +0200762 list_for_each_entry_safe(p, n, &cache->all, all) {
763 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200764 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200766
767 INIT_LIST_HEAD(&cache->unknown);
768 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769}
770
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300771struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
772 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773{
Johan Hedberg30883512012-01-04 14:16:21 +0200774 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 struct inquiry_entry *e;
776
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300777 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Johan Hedberg561aafb2012-01-04 13:31:59 +0200779 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200781 return e;
782 }
783
784 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785}
786
Johan Hedberg561aafb2012-01-04 13:31:59 +0200787struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300788 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200789{
Johan Hedberg30883512012-01-04 14:16:21 +0200790 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200791 struct inquiry_entry *e;
792
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300793 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200794
795 list_for_each_entry(e, &cache->unknown, list) {
796 if (!bacmp(&e->data.bdaddr, bdaddr))
797 return e;
798 }
799
800 return NULL;
801}
802
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200803struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300804 bdaddr_t *bdaddr,
805 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200806{
807 struct discovery_state *cache = &hdev->discovery;
808 struct inquiry_entry *e;
809
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300810 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200811
812 list_for_each_entry(e, &cache->resolve, list) {
813 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
814 return e;
815 if (!bacmp(&e->data.bdaddr, bdaddr))
816 return e;
817 }
818
819 return NULL;
820}
821
Johan Hedberga3d4e202012-01-09 00:53:02 +0200822void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300823 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200824{
825 struct discovery_state *cache = &hdev->discovery;
826 struct list_head *pos = &cache->resolve;
827 struct inquiry_entry *p;
828
829 list_del(&ie->list);
830
831 list_for_each_entry(p, &cache->resolve, list) {
832 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300833 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200834 break;
835 pos = &p->list;
836 }
837
838 list_add(&ie->list, pos);
839}
840
Johan Hedberg31754052012-01-04 13:39:52 +0200841bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300842 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
Johan Hedberg30883512012-01-04 14:16:21 +0200844 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200845 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300847 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Szymon Janc2b2fec42012-11-20 11:38:54 +0100849 hci_remove_remote_oob_data(hdev, &data->bdaddr);
850
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200851 if (ssp)
852 *ssp = data->ssp_mode;
853
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200854 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200855 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200856 if (ie->data.ssp_mode && ssp)
857 *ssp = true;
858
Johan Hedberga3d4e202012-01-09 00:53:02 +0200859 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300860 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200861 ie->data.rssi = data->rssi;
862 hci_inquiry_cache_update_resolve(hdev, ie);
863 }
864
Johan Hedberg561aafb2012-01-04 13:31:59 +0200865 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200866 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200867
Johan Hedberg561aafb2012-01-04 13:31:59 +0200868 /* Entry not in the cache. Add new one. */
869 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
870 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200871 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200872
873 list_add(&ie->all, &cache->all);
874
875 if (name_known) {
876 ie->name_state = NAME_KNOWN;
877 } else {
878 ie->name_state = NAME_NOT_KNOWN;
879 list_add(&ie->list, &cache->unknown);
880 }
881
882update:
883 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300884 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200885 ie->name_state = NAME_KNOWN;
886 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 }
888
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200889 memcpy(&ie->data, data, sizeof(*data));
890 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200892
893 if (ie->name_state == NAME_NOT_KNOWN)
894 return false;
895
896 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897}
898
899static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
900{
Johan Hedberg30883512012-01-04 14:16:21 +0200901 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 struct inquiry_info *info = (struct inquiry_info *) buf;
903 struct inquiry_entry *e;
904 int copied = 0;
905
Johan Hedberg561aafb2012-01-04 13:31:59 +0200906 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200908
909 if (copied >= num)
910 break;
911
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 bacpy(&info->bdaddr, &data->bdaddr);
913 info->pscan_rep_mode = data->pscan_rep_mode;
914 info->pscan_period_mode = data->pscan_period_mode;
915 info->pscan_mode = data->pscan_mode;
916 memcpy(info->dev_class, data->dev_class, 3);
917 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200918
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200920 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 }
922
923 BT_DBG("cache %p, copied %d", cache, copied);
924 return copied;
925}
926
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928{
929 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200930 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 struct hci_cp_inquiry cp;
932
933 BT_DBG("%s", hdev->name);
934
935 if (test_bit(HCI_INQUIRY, &hdev->flags))
936 return;
937
938 /* Start Inquiry */
939 memcpy(&cp.lap, &ir->lap, 3);
940 cp.length = ir->length;
941 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200942 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
944
Andre Guedes3e13fa12013-03-27 20:04:56 -0300945static int wait_inquiry(void *word)
946{
947 schedule();
948 return signal_pending(current);
949}
950
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951int hci_inquiry(void __user *arg)
952{
953 __u8 __user *ptr = arg;
954 struct hci_inquiry_req ir;
955 struct hci_dev *hdev;
956 int err = 0, do_inquiry = 0, max_rsp;
957 long timeo;
958 __u8 *buf;
959
960 if (copy_from_user(&ir, ptr, sizeof(ir)))
961 return -EFAULT;
962
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200963 hdev = hci_dev_get(ir.dev_id);
964 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 return -ENODEV;
966
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300967 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900968 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300969 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 inquiry_cache_flush(hdev);
971 do_inquiry = 1;
972 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300973 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Marcel Holtmann04837f62006-07-03 10:02:33 +0200975 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200976
977 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200978 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
979 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200980 if (err < 0)
981 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300982
983 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
984 * cleared). If it is interrupted by a signal, return -EINTR.
985 */
986 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
987 TASK_INTERRUPTIBLE))
988 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200989 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300991 /* for unlimited number of responses we will use buffer with
992 * 255 entries
993 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
995
996 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
997 * copy it to the user space.
998 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100999 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001000 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 err = -ENOMEM;
1002 goto done;
1003 }
1004
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001005 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001007 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009 BT_DBG("num_rsp %d", ir.num_rsp);
1010
1011 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1012 ptr += sizeof(ir);
1013 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001014 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001016 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 err = -EFAULT;
1018
1019 kfree(buf);
1020
1021done:
1022 hci_dev_put(hdev);
1023 return err;
1024}
1025
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001026static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1027{
1028 u8 ad_len = 0, flags = 0;
1029 size_t name_len;
1030
1031 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1032 flags |= LE_AD_GENERAL;
1033
1034 if (!lmp_bredr_capable(hdev))
1035 flags |= LE_AD_NO_BREDR;
1036
1037 if (lmp_le_br_capable(hdev))
1038 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1039
1040 if (lmp_host_le_br_capable(hdev))
1041 flags |= LE_AD_SIM_LE_BREDR_HOST;
1042
1043 if (flags) {
1044 BT_DBG("adv flags 0x%02x", flags);
1045
1046 ptr[0] = 2;
1047 ptr[1] = EIR_FLAGS;
1048 ptr[2] = flags;
1049
1050 ad_len += 3;
1051 ptr += 3;
1052 }
1053
1054 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1055 ptr[0] = 2;
1056 ptr[1] = EIR_TX_POWER;
1057 ptr[2] = (u8) hdev->adv_tx_power;
1058
1059 ad_len += 3;
1060 ptr += 3;
1061 }
1062
1063 name_len = strlen(hdev->dev_name);
1064 if (name_len > 0) {
1065 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1066
1067 if (name_len > max_len) {
1068 name_len = max_len;
1069 ptr[1] = EIR_NAME_SHORT;
1070 } else
1071 ptr[1] = EIR_NAME_COMPLETE;
1072
1073 ptr[0] = name_len + 1;
1074
1075 memcpy(ptr + 2, hdev->dev_name, name_len);
1076
1077 ad_len += (name_len + 2);
1078 ptr += (name_len + 2);
1079 }
1080
1081 return ad_len;
1082}
1083
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001084void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001085{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001086 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001087 struct hci_cp_le_set_adv_data cp;
1088 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001089
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001090 if (!lmp_le_capable(hdev))
1091 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001092
1093 memset(&cp, 0, sizeof(cp));
1094
1095 len = create_ad(hdev, cp.data);
1096
1097 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001098 memcmp(cp.data, hdev->adv_data, len) == 0)
1099 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001100
1101 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1102 hdev->adv_data_len = len;
1103
1104 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001105
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001106 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001107}
1108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109/* ---- HCI ioctl helpers ---- */
1110
1111int hci_dev_open(__u16 dev)
1112{
1113 struct hci_dev *hdev;
1114 int ret = 0;
1115
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001116 hdev = hci_dev_get(dev);
1117 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 return -ENODEV;
1119
1120 BT_DBG("%s %p", hdev->name, hdev);
1121
1122 hci_req_lock(hdev);
1123
Johan Hovold94324962012-03-15 14:48:41 +01001124 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1125 ret = -ENODEV;
1126 goto done;
1127 }
1128
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001129 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1130 ret = -ERFKILL;
1131 goto done;
1132 }
1133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 if (test_bit(HCI_UP, &hdev->flags)) {
1135 ret = -EALREADY;
1136 goto done;
1137 }
1138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 if (hdev->open(hdev)) {
1140 ret = -EIO;
1141 goto done;
1142 }
1143
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001144 atomic_set(&hdev->cmd_cnt, 1);
1145 set_bit(HCI_INIT, &hdev->flags);
1146
1147 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1148 ret = hdev->setup(hdev);
1149
1150 if (!ret) {
1151 /* Treat all non BR/EDR controllers as raw devices if
1152 * enable_hs is not set.
1153 */
1154 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1155 set_bit(HCI_RAW, &hdev->flags);
1156
1157 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1158 set_bit(HCI_RAW, &hdev->flags);
1159
1160 if (!test_bit(HCI_RAW, &hdev->flags))
1161 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 }
1163
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001164 clear_bit(HCI_INIT, &hdev->flags);
1165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 if (!ret) {
1167 hci_dev_hold(hdev);
1168 set_bit(HCI_UP, &hdev->flags);
1169 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001170 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1171 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001172 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001173 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001174 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001175 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001176 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001178 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001179 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001180 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
1182 skb_queue_purge(&hdev->cmd_q);
1183 skb_queue_purge(&hdev->rx_q);
1184
1185 if (hdev->flush)
1186 hdev->flush(hdev);
1187
1188 if (hdev->sent_cmd) {
1189 kfree_skb(hdev->sent_cmd);
1190 hdev->sent_cmd = NULL;
1191 }
1192
1193 hdev->close(hdev);
1194 hdev->flags = 0;
1195 }
1196
1197done:
1198 hci_req_unlock(hdev);
1199 hci_dev_put(hdev);
1200 return ret;
1201}
1202
1203static int hci_dev_do_close(struct hci_dev *hdev)
1204{
1205 BT_DBG("%s %p", hdev->name, hdev);
1206
Andre Guedes28b75a82012-02-03 17:48:00 -03001207 cancel_work_sync(&hdev->le_scan);
1208
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001209 cancel_delayed_work(&hdev->power_off);
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 hci_req_cancel(hdev, ENODEV);
1212 hci_req_lock(hdev);
1213
1214 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001215 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 hci_req_unlock(hdev);
1217 return 0;
1218 }
1219
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001220 /* Flush RX and TX works */
1221 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001222 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001224 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001225 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001226 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001227 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001228 }
1229
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001230 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001231 cancel_delayed_work(&hdev->service_cache);
1232
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001233 cancel_delayed_work_sync(&hdev->le_scan_disable);
1234
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001235 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 inquiry_cache_flush(hdev);
1237 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001238 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239
1240 hci_notify(hdev, HCI_DEV_DOWN);
1241
1242 if (hdev->flush)
1243 hdev->flush(hdev);
1244
1245 /* Reset device */
1246 skb_queue_purge(&hdev->cmd_q);
1247 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001248 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001249 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001251 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 clear_bit(HCI_INIT, &hdev->flags);
1253 }
1254
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001255 /* flush cmd work */
1256 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
1258 /* Drop queues */
1259 skb_queue_purge(&hdev->rx_q);
1260 skb_queue_purge(&hdev->cmd_q);
1261 skb_queue_purge(&hdev->raw_q);
1262
1263 /* Drop last sent command */
1264 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001265 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 kfree_skb(hdev->sent_cmd);
1267 hdev->sent_cmd = NULL;
1268 }
1269
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001270 kfree_skb(hdev->recv_evt);
1271 hdev->recv_evt = NULL;
1272
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 /* After this point our queues are empty
1274 * and no tasks are scheduled. */
1275 hdev->close(hdev);
1276
Johan Hedberg35b973c2013-03-15 17:06:59 -05001277 /* Clear flags */
1278 hdev->flags = 0;
1279 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1280
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001281 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1282 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001283 hci_dev_lock(hdev);
1284 mgmt_powered(hdev, 0);
1285 hci_dev_unlock(hdev);
1286 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001287
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001288 /* Controller radio is available but is currently powered down */
1289 hdev->amp_status = 0;
1290
Johan Hedberge59fda82012-02-22 18:11:53 +02001291 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001292 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 hci_req_unlock(hdev);
1295
1296 hci_dev_put(hdev);
1297 return 0;
1298}
1299
1300int hci_dev_close(__u16 dev)
1301{
1302 struct hci_dev *hdev;
1303 int err;
1304
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001305 hdev = hci_dev_get(dev);
1306 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001308
1309 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1310 cancel_delayed_work(&hdev->power_off);
1311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001313
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 hci_dev_put(hdev);
1315 return err;
1316}
1317
1318int hci_dev_reset(__u16 dev)
1319{
1320 struct hci_dev *hdev;
1321 int ret = 0;
1322
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001323 hdev = hci_dev_get(dev);
1324 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 return -ENODEV;
1326
1327 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
1329 if (!test_bit(HCI_UP, &hdev->flags))
1330 goto done;
1331
1332 /* Drop queues */
1333 skb_queue_purge(&hdev->rx_q);
1334 skb_queue_purge(&hdev->cmd_q);
1335
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001336 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 inquiry_cache_flush(hdev);
1338 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001339 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
1341 if (hdev->flush)
1342 hdev->flush(hdev);
1343
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001344 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001345 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001348 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
1350done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 hci_req_unlock(hdev);
1352 hci_dev_put(hdev);
1353 return ret;
1354}
1355
1356int hci_dev_reset_stat(__u16 dev)
1357{
1358 struct hci_dev *hdev;
1359 int ret = 0;
1360
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001361 hdev = hci_dev_get(dev);
1362 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 return -ENODEV;
1364
1365 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1366
1367 hci_dev_put(hdev);
1368
1369 return ret;
1370}
1371
1372int hci_dev_cmd(unsigned int cmd, void __user *arg)
1373{
1374 struct hci_dev *hdev;
1375 struct hci_dev_req dr;
1376 int err = 0;
1377
1378 if (copy_from_user(&dr, arg, sizeof(dr)))
1379 return -EFAULT;
1380
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001381 hdev = hci_dev_get(dr.dev_id);
1382 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 return -ENODEV;
1384
1385 switch (cmd) {
1386 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001387 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1388 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 break;
1390
1391 case HCISETENCRYPT:
1392 if (!lmp_encrypt_capable(hdev)) {
1393 err = -EOPNOTSUPP;
1394 break;
1395 }
1396
1397 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1398 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001399 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1400 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 if (err)
1402 break;
1403 }
1404
Johan Hedberg01178cd2013-03-05 20:37:41 +02001405 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1406 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 break;
1408
1409 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001410 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1411 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 break;
1413
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001414 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001415 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1416 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001417 break;
1418
1419 case HCISETLINKMODE:
1420 hdev->link_mode = ((__u16) dr.dev_opt) &
1421 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1422 break;
1423
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 case HCISETPTYPE:
1425 hdev->pkt_type = (__u16) dr.dev_opt;
1426 break;
1427
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001429 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1430 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 break;
1432
1433 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001434 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1435 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 break;
1437
1438 default:
1439 err = -EINVAL;
1440 break;
1441 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001442
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 hci_dev_put(hdev);
1444 return err;
1445}
1446
1447int hci_get_dev_list(void __user *arg)
1448{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001449 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 struct hci_dev_list_req *dl;
1451 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 int n = 0, size, err;
1453 __u16 dev_num;
1454
1455 if (get_user(dev_num, (__u16 __user *) arg))
1456 return -EFAULT;
1457
1458 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1459 return -EINVAL;
1460
1461 size = sizeof(*dl) + dev_num * sizeof(*dr);
1462
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001463 dl = kzalloc(size, GFP_KERNEL);
1464 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 return -ENOMEM;
1466
1467 dr = dl->dev_req;
1468
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001469 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001470 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001471 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001472 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001473
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001474 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1475 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 (dr + n)->dev_id = hdev->id;
1478 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001479
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 if (++n >= dev_num)
1481 break;
1482 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001483 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
1485 dl->dev_num = n;
1486 size = sizeof(*dl) + n * sizeof(*dr);
1487
1488 err = copy_to_user(arg, dl, size);
1489 kfree(dl);
1490
1491 return err ? -EFAULT : 0;
1492}
1493
1494int hci_get_dev_info(void __user *arg)
1495{
1496 struct hci_dev *hdev;
1497 struct hci_dev_info di;
1498 int err = 0;
1499
1500 if (copy_from_user(&di, arg, sizeof(di)))
1501 return -EFAULT;
1502
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001503 hdev = hci_dev_get(di.dev_id);
1504 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 return -ENODEV;
1506
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001507 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001508 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001509
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001510 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1511 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 strcpy(di.name, hdev->name);
1514 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001515 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 di.flags = hdev->flags;
1517 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001518 if (lmp_bredr_capable(hdev)) {
1519 di.acl_mtu = hdev->acl_mtu;
1520 di.acl_pkts = hdev->acl_pkts;
1521 di.sco_mtu = hdev->sco_mtu;
1522 di.sco_pkts = hdev->sco_pkts;
1523 } else {
1524 di.acl_mtu = hdev->le_mtu;
1525 di.acl_pkts = hdev->le_pkts;
1526 di.sco_mtu = 0;
1527 di.sco_pkts = 0;
1528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 di.link_policy = hdev->link_policy;
1530 di.link_mode = hdev->link_mode;
1531
1532 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1533 memcpy(&di.features, &hdev->features, sizeof(di.features));
1534
1535 if (copy_to_user(arg, &di, sizeof(di)))
1536 err = -EFAULT;
1537
1538 hci_dev_put(hdev);
1539
1540 return err;
1541}
1542
1543/* ---- Interface to HCI drivers ---- */
1544
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001545static int hci_rfkill_set_block(void *data, bool blocked)
1546{
1547 struct hci_dev *hdev = data;
1548
1549 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1550
1551 if (!blocked)
1552 return 0;
1553
1554 hci_dev_do_close(hdev);
1555
1556 return 0;
1557}
1558
1559static const struct rfkill_ops hci_rfkill_ops = {
1560 .set_block = hci_rfkill_set_block,
1561};
1562
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001563static void hci_power_on(struct work_struct *work)
1564{
1565 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001566 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001567
1568 BT_DBG("%s", hdev->name);
1569
Johan Hedberg96570ff2013-05-29 09:51:29 +03001570 err = hci_dev_open(hdev->id);
1571 if (err < 0) {
1572 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001573 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001574 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001575
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001576 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001577 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1578 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001579
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001580 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001581 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001582}
1583
1584static void hci_power_off(struct work_struct *work)
1585{
Johan Hedberg32435532011-11-07 22:16:04 +02001586 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001587 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001588
1589 BT_DBG("%s", hdev->name);
1590
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001591 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001592}
1593
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001594static void hci_discov_off(struct work_struct *work)
1595{
1596 struct hci_dev *hdev;
1597 u8 scan = SCAN_PAGE;
1598
1599 hdev = container_of(work, struct hci_dev, discov_off.work);
1600
1601 BT_DBG("%s", hdev->name);
1602
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001603 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001604
1605 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1606
1607 hdev->discov_timeout = 0;
1608
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001609 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001610}
1611
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001612int hci_uuids_clear(struct hci_dev *hdev)
1613{
Johan Hedberg48210022013-01-27 00:31:28 +02001614 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001615
Johan Hedberg48210022013-01-27 00:31:28 +02001616 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1617 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001618 kfree(uuid);
1619 }
1620
1621 return 0;
1622}
1623
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001624int hci_link_keys_clear(struct hci_dev *hdev)
1625{
1626 struct list_head *p, *n;
1627
1628 list_for_each_safe(p, n, &hdev->link_keys) {
1629 struct link_key *key;
1630
1631 key = list_entry(p, struct link_key, list);
1632
1633 list_del(p);
1634 kfree(key);
1635 }
1636
1637 return 0;
1638}
1639
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001640int hci_smp_ltks_clear(struct hci_dev *hdev)
1641{
1642 struct smp_ltk *k, *tmp;
1643
1644 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1645 list_del(&k->list);
1646 kfree(k);
1647 }
1648
1649 return 0;
1650}
1651
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001652struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1653{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001654 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001655
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001656 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001657 if (bacmp(bdaddr, &k->bdaddr) == 0)
1658 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001659
1660 return NULL;
1661}
1662
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301663static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001664 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001665{
1666 /* Legacy key */
1667 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301668 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001669
1670 /* Debug keys are insecure so don't store them persistently */
1671 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301672 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001673
1674 /* Changed combination key and there's no previous one */
1675 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301676 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001677
1678 /* Security mode 3 case */
1679 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301680 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001681
1682 /* Neither local nor remote side had no-bonding as requirement */
1683 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301684 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001685
1686 /* Local side had dedicated bonding as requirement */
1687 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301688 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001689
1690 /* Remote side had dedicated bonding as requirement */
1691 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301692 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001693
1694 /* If none of the above criteria match, then don't store the key
1695 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301696 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001697}
1698
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001699struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001700{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001701 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001702
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001703 list_for_each_entry(k, &hdev->long_term_keys, list) {
1704 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001705 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001706 continue;
1707
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001708 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001709 }
1710
1711 return NULL;
1712}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001713
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001714struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001715 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001716{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001717 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001718
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001719 list_for_each_entry(k, &hdev->long_term_keys, list)
1720 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001721 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001722 return k;
1723
1724 return NULL;
1725}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001726
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001727int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001728 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001729{
1730 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301731 u8 old_key_type;
1732 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001733
1734 old_key = hci_find_link_key(hdev, bdaddr);
1735 if (old_key) {
1736 old_key_type = old_key->type;
1737 key = old_key;
1738 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001739 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001740 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1741 if (!key)
1742 return -ENOMEM;
1743 list_add(&key->list, &hdev->link_keys);
1744 }
1745
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001746 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001747
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001748 /* Some buggy controller combinations generate a changed
1749 * combination key for legacy pairing even when there's no
1750 * previous key */
1751 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001752 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001753 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001754 if (conn)
1755 conn->key_type = type;
1756 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001757
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001758 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001759 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001760 key->pin_len = pin_len;
1761
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001762 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001763 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001764 else
1765 key->type = type;
1766
Johan Hedberg4df378a2011-04-28 11:29:03 -07001767 if (!new_key)
1768 return 0;
1769
1770 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1771
Johan Hedberg744cf192011-11-08 20:40:14 +02001772 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001773
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301774 if (conn)
1775 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001776
1777 return 0;
1778}
1779
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001780int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001781 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001782 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001783{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001784 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001785
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001786 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1787 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001788
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001789 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1790 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001791 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001792 else {
1793 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001794 if (!key)
1795 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001796 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001797 }
1798
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001799 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001800 key->bdaddr_type = addr_type;
1801 memcpy(key->val, tk, sizeof(key->val));
1802 key->authenticated = authenticated;
1803 key->ediv = ediv;
1804 key->enc_size = enc_size;
1805 key->type = type;
1806 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001807
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001808 if (!new_key)
1809 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001810
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001811 if (type & HCI_SMP_LTK)
1812 mgmt_new_ltk(hdev, key, 1);
1813
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001814 return 0;
1815}
1816
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001817int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1818{
1819 struct link_key *key;
1820
1821 key = hci_find_link_key(hdev, bdaddr);
1822 if (!key)
1823 return -ENOENT;
1824
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001825 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001826
1827 list_del(&key->list);
1828 kfree(key);
1829
1830 return 0;
1831}
1832
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001833int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1834{
1835 struct smp_ltk *k, *tmp;
1836
1837 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1838 if (bacmp(bdaddr, &k->bdaddr))
1839 continue;
1840
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001841 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001842
1843 list_del(&k->list);
1844 kfree(k);
1845 }
1846
1847 return 0;
1848}
1849
Ville Tervo6bd32322011-02-16 16:32:41 +02001850/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001851static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001852{
1853 struct hci_dev *hdev = (void *) arg;
1854
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001855 if (hdev->sent_cmd) {
1856 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1857 u16 opcode = __le16_to_cpu(sent->opcode);
1858
1859 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1860 } else {
1861 BT_ERR("%s command tx timeout", hdev->name);
1862 }
1863
Ville Tervo6bd32322011-02-16 16:32:41 +02001864 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001865 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001866}
1867
Szymon Janc2763eda2011-03-22 13:12:22 +01001868struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001869 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001870{
1871 struct oob_data *data;
1872
1873 list_for_each_entry(data, &hdev->remote_oob_data, list)
1874 if (bacmp(bdaddr, &data->bdaddr) == 0)
1875 return data;
1876
1877 return NULL;
1878}
1879
1880int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1881{
1882 struct oob_data *data;
1883
1884 data = hci_find_remote_oob_data(hdev, bdaddr);
1885 if (!data)
1886 return -ENOENT;
1887
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001888 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001889
1890 list_del(&data->list);
1891 kfree(data);
1892
1893 return 0;
1894}
1895
1896int hci_remote_oob_data_clear(struct hci_dev *hdev)
1897{
1898 struct oob_data *data, *n;
1899
1900 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1901 list_del(&data->list);
1902 kfree(data);
1903 }
1904
1905 return 0;
1906}
1907
1908int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001909 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001910{
1911 struct oob_data *data;
1912
1913 data = hci_find_remote_oob_data(hdev, bdaddr);
1914
1915 if (!data) {
1916 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1917 if (!data)
1918 return -ENOMEM;
1919
1920 bacpy(&data->bdaddr, bdaddr);
1921 list_add(&data->list, &hdev->remote_oob_data);
1922 }
1923
1924 memcpy(data->hash, hash, sizeof(data->hash));
1925 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1926
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001927 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001928
1929 return 0;
1930}
1931
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001932struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001933{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001934 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001935
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001936 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001937 if (bacmp(bdaddr, &b->bdaddr) == 0)
1938 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001939
1940 return NULL;
1941}
1942
1943int hci_blacklist_clear(struct hci_dev *hdev)
1944{
1945 struct list_head *p, *n;
1946
1947 list_for_each_safe(p, n, &hdev->blacklist) {
1948 struct bdaddr_list *b;
1949
1950 b = list_entry(p, struct bdaddr_list, list);
1951
1952 list_del(p);
1953 kfree(b);
1954 }
1955
1956 return 0;
1957}
1958
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001959int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001960{
1961 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001962
1963 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1964 return -EBADF;
1965
Antti Julku5e762442011-08-25 16:48:02 +03001966 if (hci_blacklist_lookup(hdev, bdaddr))
1967 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001968
1969 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001970 if (!entry)
1971 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001972
1973 bacpy(&entry->bdaddr, bdaddr);
1974
1975 list_add(&entry->list, &hdev->blacklist);
1976
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001977 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001978}
1979
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001980int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001981{
1982 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001983
Szymon Janc1ec918c2011-11-16 09:32:21 +01001984 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001985 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001986
1987 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001988 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001989 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001990
1991 list_del(&entry->list);
1992 kfree(entry);
1993
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001994 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001995}
1996
Johan Hedberg42c6b122013-03-05 20:37:49 +02001997static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001998{
1999 struct le_scan_params *param = (struct le_scan_params *) opt;
2000 struct hci_cp_le_set_scan_param cp;
2001
2002 memset(&cp, 0, sizeof(cp));
2003 cp.type = param->type;
2004 cp.interval = cpu_to_le16(param->interval);
2005 cp.window = cpu_to_le16(param->window);
2006
Johan Hedberg42c6b122013-03-05 20:37:49 +02002007 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002008}
2009
Johan Hedberg42c6b122013-03-05 20:37:49 +02002010static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002011{
2012 struct hci_cp_le_set_scan_enable cp;
2013
2014 memset(&cp, 0, sizeof(cp));
Andre Guedes76a388b2013-04-04 20:21:02 -03002015 cp.enable = LE_SCAN_ENABLE;
Andre Guedes525e2962013-04-04 20:21:01 -03002016 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002017
Johan Hedberg42c6b122013-03-05 20:37:49 +02002018 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002019}
2020
2021static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002022 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002023{
2024 long timeo = msecs_to_jiffies(3000);
2025 struct le_scan_params param;
2026 int err;
2027
2028 BT_DBG("%s", hdev->name);
2029
2030 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2031 return -EINPROGRESS;
2032
2033 param.type = type;
2034 param.interval = interval;
2035 param.window = window;
2036
2037 hci_req_lock(hdev);
2038
Johan Hedberg01178cd2013-03-05 20:37:41 +02002039 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2040 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002041 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02002042 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002043
2044 hci_req_unlock(hdev);
2045
2046 if (err < 0)
2047 return err;
2048
Johan Hedberg46818ed2013-01-14 22:33:52 +02002049 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
Andre Guedesb6c75152013-04-04 20:20:59 -03002050 timeout);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002051
2052 return 0;
2053}
2054
Andre Guedes7dbfac12012-03-15 16:52:07 -03002055int hci_cancel_le_scan(struct hci_dev *hdev)
2056{
2057 BT_DBG("%s", hdev->name);
2058
2059 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2060 return -EALREADY;
2061
2062 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2063 struct hci_cp_le_set_scan_enable cp;
2064
2065 /* Send HCI command to disable LE Scan */
2066 memset(&cp, 0, sizeof(cp));
2067 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2068 }
2069
2070 return 0;
2071}
2072
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002073static void le_scan_disable_work(struct work_struct *work)
2074{
2075 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002076 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002077 struct hci_cp_le_set_scan_enable cp;
2078
2079 BT_DBG("%s", hdev->name);
2080
2081 memset(&cp, 0, sizeof(cp));
2082
2083 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2084}
2085
Andre Guedes28b75a82012-02-03 17:48:00 -03002086static void le_scan_work(struct work_struct *work)
2087{
2088 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2089 struct le_scan_params *param = &hdev->le_scan_params;
2090
2091 BT_DBG("%s", hdev->name);
2092
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002093 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2094 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03002095}
2096
2097int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002098 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03002099{
2100 struct le_scan_params *param = &hdev->le_scan_params;
2101
2102 BT_DBG("%s", hdev->name);
2103
Johan Hedbergf15504782012-10-24 21:12:03 +03002104 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2105 return -ENOTSUPP;
2106
Andre Guedes28b75a82012-02-03 17:48:00 -03002107 if (work_busy(&hdev->le_scan))
2108 return -EINPROGRESS;
2109
2110 param->type = type;
2111 param->interval = interval;
2112 param->window = window;
2113 param->timeout = timeout;
2114
2115 queue_work(system_long_wq, &hdev->le_scan);
2116
2117 return 0;
2118}
2119
David Herrmann9be0dab2012-04-22 14:39:57 +02002120/* Alloc HCI device */
2121struct hci_dev *hci_alloc_dev(void)
2122{
2123 struct hci_dev *hdev;
2124
2125 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2126 if (!hdev)
2127 return NULL;
2128
David Herrmannb1b813d2012-04-22 14:39:58 +02002129 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2130 hdev->esco_type = (ESCO_HV1);
2131 hdev->link_mode = (HCI_LM_ACCEPT);
2132 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002133 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2134 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002135
David Herrmannb1b813d2012-04-22 14:39:58 +02002136 hdev->sniff_max_interval = 800;
2137 hdev->sniff_min_interval = 80;
2138
2139 mutex_init(&hdev->lock);
2140 mutex_init(&hdev->req_lock);
2141
2142 INIT_LIST_HEAD(&hdev->mgmt_pending);
2143 INIT_LIST_HEAD(&hdev->blacklist);
2144 INIT_LIST_HEAD(&hdev->uuids);
2145 INIT_LIST_HEAD(&hdev->link_keys);
2146 INIT_LIST_HEAD(&hdev->long_term_keys);
2147 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002148 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002149
2150 INIT_WORK(&hdev->rx_work, hci_rx_work);
2151 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2152 INIT_WORK(&hdev->tx_work, hci_tx_work);
2153 INIT_WORK(&hdev->power_on, hci_power_on);
2154 INIT_WORK(&hdev->le_scan, le_scan_work);
2155
David Herrmannb1b813d2012-04-22 14:39:58 +02002156 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2157 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2158 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2159
David Herrmannb1b813d2012-04-22 14:39:58 +02002160 skb_queue_head_init(&hdev->rx_q);
2161 skb_queue_head_init(&hdev->cmd_q);
2162 skb_queue_head_init(&hdev->raw_q);
2163
2164 init_waitqueue_head(&hdev->req_wait_q);
2165
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002166 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002167
David Herrmannb1b813d2012-04-22 14:39:58 +02002168 hci_init_sysfs(hdev);
2169 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002170
2171 return hdev;
2172}
2173EXPORT_SYMBOL(hci_alloc_dev);
2174
2175/* Free HCI device */
2176void hci_free_dev(struct hci_dev *hdev)
2177{
David Herrmann9be0dab2012-04-22 14:39:57 +02002178 /* will free via device release */
2179 put_device(&hdev->dev);
2180}
2181EXPORT_SYMBOL(hci_free_dev);
2182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183/* Register HCI device */
2184int hci_register_dev(struct hci_dev *hdev)
2185{
David Herrmannb1b813d2012-04-22 14:39:58 +02002186 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
David Herrmann010666a2012-01-07 15:47:07 +01002188 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 return -EINVAL;
2190
Mat Martineau08add512011-11-02 16:18:36 -07002191 /* Do not allow HCI_AMP devices to register at index 0,
2192 * so the index can be used as the AMP controller ID.
2193 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002194 switch (hdev->dev_type) {
2195 case HCI_BREDR:
2196 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2197 break;
2198 case HCI_AMP:
2199 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2200 break;
2201 default:
2202 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002204
Sasha Levin3df92b32012-05-27 22:36:56 +02002205 if (id < 0)
2206 return id;
2207
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 sprintf(hdev->name, "hci%d", id);
2209 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002210
2211 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2212
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002213 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002214 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002215 if (!hdev->workqueue) {
2216 error = -ENOMEM;
2217 goto err;
2218 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002219
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002220 hdev->req_workqueue = alloc_workqueue(hdev->name,
2221 WQ_HIGHPRI | WQ_UNBOUND |
2222 WQ_MEM_RECLAIM, 1);
2223 if (!hdev->req_workqueue) {
2224 destroy_workqueue(hdev->workqueue);
2225 error = -ENOMEM;
2226 goto err;
2227 }
2228
David Herrmann33ca9542011-10-08 14:58:49 +02002229 error = hci_add_sysfs(hdev);
2230 if (error < 0)
2231 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002233 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002234 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2235 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002236 if (hdev->rfkill) {
2237 if (rfkill_register(hdev->rfkill) < 0) {
2238 rfkill_destroy(hdev->rfkill);
2239 hdev->rfkill = NULL;
2240 }
2241 }
2242
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002243 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002244
2245 if (hdev->dev_type != HCI_AMP)
2246 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2247
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002248 write_lock(&hci_dev_list_lock);
2249 list_add(&hdev->list, &hci_dev_list);
2250 write_unlock(&hci_dev_list_lock);
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002253 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
Johan Hedberg19202572013-01-14 22:33:51 +02002255 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002258
David Herrmann33ca9542011-10-08 14:58:49 +02002259err_wqueue:
2260 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002261 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002262err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002263 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002264
David Herrmann33ca9542011-10-08 14:58:49 +02002265 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266}
2267EXPORT_SYMBOL(hci_register_dev);
2268
2269/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002270void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271{
Sasha Levin3df92b32012-05-27 22:36:56 +02002272 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002273
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002274 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
Johan Hovold94324962012-03-15 14:48:41 +01002276 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2277
Sasha Levin3df92b32012-05-27 22:36:56 +02002278 id = hdev->id;
2279
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002280 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002282 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
2284 hci_dev_do_close(hdev);
2285
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302286 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002287 kfree_skb(hdev->reassembly[i]);
2288
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002289 cancel_work_sync(&hdev->power_on);
2290
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002291 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002292 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002293 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002294 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002295 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002296 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002297
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002298 /* mgmt_index_removed should take care of emptying the
2299 * pending list */
2300 BUG_ON(!list_empty(&hdev->mgmt_pending));
2301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 hci_notify(hdev, HCI_DEV_UNREG);
2303
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002304 if (hdev->rfkill) {
2305 rfkill_unregister(hdev->rfkill);
2306 rfkill_destroy(hdev->rfkill);
2307 }
2308
David Herrmannce242972011-10-08 14:58:48 +02002309 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002310
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002311 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002312 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002313
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002314 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002315 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002316 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002317 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002318 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002319 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002320 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002321
David Herrmanndc946bd2012-01-07 15:47:24 +01002322 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002323
2324 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325}
2326EXPORT_SYMBOL(hci_unregister_dev);
2327
2328/* Suspend HCI device */
2329int hci_suspend_dev(struct hci_dev *hdev)
2330{
2331 hci_notify(hdev, HCI_DEV_SUSPEND);
2332 return 0;
2333}
2334EXPORT_SYMBOL(hci_suspend_dev);
2335
2336/* Resume HCI device */
2337int hci_resume_dev(struct hci_dev *hdev)
2338{
2339 hci_notify(hdev, HCI_DEV_RESUME);
2340 return 0;
2341}
2342EXPORT_SYMBOL(hci_resume_dev);
2343
Marcel Holtmann76bca882009-11-18 00:40:39 +01002344/* Receive frame from HCI drivers */
2345int hci_recv_frame(struct sk_buff *skb)
2346{
2347 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2348 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002349 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002350 kfree_skb(skb);
2351 return -ENXIO;
2352 }
2353
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002354 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002355 bt_cb(skb)->incoming = 1;
2356
2357 /* Time stamp */
2358 __net_timestamp(skb);
2359
Marcel Holtmann76bca882009-11-18 00:40:39 +01002360 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002361 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002362
Marcel Holtmann76bca882009-11-18 00:40:39 +01002363 return 0;
2364}
2365EXPORT_SYMBOL(hci_recv_frame);
2366
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302367static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002368 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302369{
2370 int len = 0;
2371 int hlen = 0;
2372 int remain = count;
2373 struct sk_buff *skb;
2374 struct bt_skb_cb *scb;
2375
2376 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002377 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302378 return -EILSEQ;
2379
2380 skb = hdev->reassembly[index];
2381
2382 if (!skb) {
2383 switch (type) {
2384 case HCI_ACLDATA_PKT:
2385 len = HCI_MAX_FRAME_SIZE;
2386 hlen = HCI_ACL_HDR_SIZE;
2387 break;
2388 case HCI_EVENT_PKT:
2389 len = HCI_MAX_EVENT_SIZE;
2390 hlen = HCI_EVENT_HDR_SIZE;
2391 break;
2392 case HCI_SCODATA_PKT:
2393 len = HCI_MAX_SCO_SIZE;
2394 hlen = HCI_SCO_HDR_SIZE;
2395 break;
2396 }
2397
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002398 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302399 if (!skb)
2400 return -ENOMEM;
2401
2402 scb = (void *) skb->cb;
2403 scb->expect = hlen;
2404 scb->pkt_type = type;
2405
2406 skb->dev = (void *) hdev;
2407 hdev->reassembly[index] = skb;
2408 }
2409
2410 while (count) {
2411 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002412 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302413
2414 memcpy(skb_put(skb, len), data, len);
2415
2416 count -= len;
2417 data += len;
2418 scb->expect -= len;
2419 remain = count;
2420
2421 switch (type) {
2422 case HCI_EVENT_PKT:
2423 if (skb->len == HCI_EVENT_HDR_SIZE) {
2424 struct hci_event_hdr *h = hci_event_hdr(skb);
2425 scb->expect = h->plen;
2426
2427 if (skb_tailroom(skb) < scb->expect) {
2428 kfree_skb(skb);
2429 hdev->reassembly[index] = NULL;
2430 return -ENOMEM;
2431 }
2432 }
2433 break;
2434
2435 case HCI_ACLDATA_PKT:
2436 if (skb->len == HCI_ACL_HDR_SIZE) {
2437 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2438 scb->expect = __le16_to_cpu(h->dlen);
2439
2440 if (skb_tailroom(skb) < scb->expect) {
2441 kfree_skb(skb);
2442 hdev->reassembly[index] = NULL;
2443 return -ENOMEM;
2444 }
2445 }
2446 break;
2447
2448 case HCI_SCODATA_PKT:
2449 if (skb->len == HCI_SCO_HDR_SIZE) {
2450 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2451 scb->expect = h->dlen;
2452
2453 if (skb_tailroom(skb) < scb->expect) {
2454 kfree_skb(skb);
2455 hdev->reassembly[index] = NULL;
2456 return -ENOMEM;
2457 }
2458 }
2459 break;
2460 }
2461
2462 if (scb->expect == 0) {
2463 /* Complete frame */
2464
2465 bt_cb(skb)->pkt_type = type;
2466 hci_recv_frame(skb);
2467
2468 hdev->reassembly[index] = NULL;
2469 return remain;
2470 }
2471 }
2472
2473 return remain;
2474}
2475
Marcel Holtmannef222012007-07-11 06:42:04 +02002476int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2477{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302478 int rem = 0;
2479
Marcel Holtmannef222012007-07-11 06:42:04 +02002480 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2481 return -EILSEQ;
2482
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002483 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002484 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302485 if (rem < 0)
2486 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002487
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302488 data += (count - rem);
2489 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002490 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002491
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302492 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002493}
2494EXPORT_SYMBOL(hci_recv_fragment);
2495
Suraj Sumangala99811512010-07-14 13:02:19 +05302496#define STREAM_REASSEMBLY 0
2497
2498int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2499{
2500 int type;
2501 int rem = 0;
2502
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002503 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302504 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2505
2506 if (!skb) {
2507 struct { char type; } *pkt;
2508
2509 /* Start of the frame */
2510 pkt = data;
2511 type = pkt->type;
2512
2513 data++;
2514 count--;
2515 } else
2516 type = bt_cb(skb)->pkt_type;
2517
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002518 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002519 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302520 if (rem < 0)
2521 return rem;
2522
2523 data += (count - rem);
2524 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002525 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302526
2527 return rem;
2528}
2529EXPORT_SYMBOL(hci_recv_stream_fragment);
2530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531/* ---- Interface to upper protocols ---- */
2532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533int hci_register_cb(struct hci_cb *cb)
2534{
2535 BT_DBG("%p name %s", cb, cb->name);
2536
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002537 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002539 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540
2541 return 0;
2542}
2543EXPORT_SYMBOL(hci_register_cb);
2544
2545int hci_unregister_cb(struct hci_cb *cb)
2546{
2547 BT_DBG("%p name %s", cb, cb->name);
2548
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002549 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002551 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552
2553 return 0;
2554}
2555EXPORT_SYMBOL(hci_unregister_cb);
2556
2557static int hci_send_frame(struct sk_buff *skb)
2558{
2559 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2560
2561 if (!hdev) {
2562 kfree_skb(skb);
2563 return -ENODEV;
2564 }
2565
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002566 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002568 /* Time stamp */
2569 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002571 /* Send copy to monitor */
2572 hci_send_to_monitor(hdev, skb);
2573
2574 if (atomic_read(&hdev->promisc)) {
2575 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002576 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 }
2578
2579 /* Get rid of skb owner, prior to sending to the driver. */
2580 skb_orphan(skb);
2581
2582 return hdev->send(skb);
2583}
2584
Johan Hedberg3119ae92013-03-05 20:37:44 +02002585void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2586{
2587 skb_queue_head_init(&req->cmd_q);
2588 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002589 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002590}
2591
2592int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2593{
2594 struct hci_dev *hdev = req->hdev;
2595 struct sk_buff *skb;
2596 unsigned long flags;
2597
2598 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2599
Andre Guedes5d73e032013-03-08 11:20:16 -03002600 /* If an error occured during request building, remove all HCI
2601 * commands queued on the HCI request queue.
2602 */
2603 if (req->err) {
2604 skb_queue_purge(&req->cmd_q);
2605 return req->err;
2606 }
2607
Johan Hedberg3119ae92013-03-05 20:37:44 +02002608 /* Do not allow empty requests */
2609 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002610 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002611
2612 skb = skb_peek_tail(&req->cmd_q);
2613 bt_cb(skb)->req.complete = complete;
2614
2615 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2616 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2617 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2618
2619 queue_work(hdev->workqueue, &hdev->cmd_work);
2620
2621 return 0;
2622}
2623
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002624static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002625 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626{
2627 int len = HCI_COMMAND_HDR_SIZE + plen;
2628 struct hci_command_hdr *hdr;
2629 struct sk_buff *skb;
2630
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002632 if (!skb)
2633 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634
2635 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002636 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 hdr->plen = plen;
2638
2639 if (plen)
2640 memcpy(skb_put(skb, plen), param, plen);
2641
2642 BT_DBG("skb len %d", skb->len);
2643
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002644 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002646
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002647 return skb;
2648}
2649
2650/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002651int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2652 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002653{
2654 struct sk_buff *skb;
2655
2656 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2657
2658 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2659 if (!skb) {
2660 BT_ERR("%s no memory for command", hdev->name);
2661 return -ENOMEM;
2662 }
2663
Johan Hedberg11714b32013-03-05 20:37:47 +02002664 /* Stand-alone HCI commands must be flaged as
2665 * single-command requests.
2666 */
2667 bt_cb(skb)->req.start = true;
2668
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002670 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
2672 return 0;
2673}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674
Johan Hedberg71c76a12013-03-05 20:37:46 +02002675/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002676void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2677 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002678{
2679 struct hci_dev *hdev = req->hdev;
2680 struct sk_buff *skb;
2681
2682 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2683
Andre Guedes34739c12013-03-08 11:20:18 -03002684 /* If an error occured during request building, there is no point in
2685 * queueing the HCI command. We can simply return.
2686 */
2687 if (req->err)
2688 return;
2689
Johan Hedberg71c76a12013-03-05 20:37:46 +02002690 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2691 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002692 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2693 hdev->name, opcode);
2694 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002695 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002696 }
2697
2698 if (skb_queue_empty(&req->cmd_q))
2699 bt_cb(skb)->req.start = true;
2700
Johan Hedberg02350a72013-04-03 21:50:29 +03002701 bt_cb(skb)->req.event = event;
2702
Johan Hedberg71c76a12013-03-05 20:37:46 +02002703 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002704}
2705
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002706void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2707 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002708{
2709 hci_req_add_ev(req, opcode, plen, param, 0);
2710}
2711
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002713void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714{
2715 struct hci_command_hdr *hdr;
2716
2717 if (!hdev->sent_cmd)
2718 return NULL;
2719
2720 hdr = (void *) hdev->sent_cmd->data;
2721
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002722 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 return NULL;
2724
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002725 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726
2727 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2728}
2729
2730/* Send ACL data */
2731static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2732{
2733 struct hci_acl_hdr *hdr;
2734 int len = skb->len;
2735
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002736 skb_push(skb, HCI_ACL_HDR_SIZE);
2737 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002738 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002739 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2740 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741}
2742
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002743static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002744 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002746 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 struct hci_dev *hdev = conn->hdev;
2748 struct sk_buff *list;
2749
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002750 skb->len = skb_headlen(skb);
2751 skb->data_len = 0;
2752
2753 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002754
2755 switch (hdev->dev_type) {
2756 case HCI_BREDR:
2757 hci_add_acl_hdr(skb, conn->handle, flags);
2758 break;
2759 case HCI_AMP:
2760 hci_add_acl_hdr(skb, chan->handle, flags);
2761 break;
2762 default:
2763 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2764 return;
2765 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002766
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002767 list = skb_shinfo(skb)->frag_list;
2768 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 /* Non fragmented */
2770 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2771
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002772 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 } else {
2774 /* Fragmented */
2775 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2776
2777 skb_shinfo(skb)->frag_list = NULL;
2778
2779 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002780 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002782 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002783
2784 flags &= ~ACL_START;
2785 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 do {
2787 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002788
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002790 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002791 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
2793 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2794
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002795 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 } while (list);
2797
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002798 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002800}
2801
2802void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2803{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002804 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002805
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002806 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002807
2808 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002809
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002810 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002812 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814
2815/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002816void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817{
2818 struct hci_dev *hdev = conn->hdev;
2819 struct hci_sco_hdr hdr;
2820
2821 BT_DBG("%s len %d", hdev->name, skb->len);
2822
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002823 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 hdr.dlen = skb->len;
2825
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002826 skb_push(skb, HCI_SCO_HDR_SIZE);
2827 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002828 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829
2830 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002831 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002832
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002834 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836
2837/* ---- HCI TX task (outgoing data) ---- */
2838
2839/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002840static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2841 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842{
2843 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002844 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002845 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002847 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002849
2850 rcu_read_lock();
2851
2852 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002853 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002855
2856 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2857 continue;
2858
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 num++;
2860
2861 if (c->sent < min) {
2862 min = c->sent;
2863 conn = c;
2864 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002865
2866 if (hci_conn_num(hdev, type) == num)
2867 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 }
2869
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002870 rcu_read_unlock();
2871
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002873 int cnt, q;
2874
2875 switch (conn->type) {
2876 case ACL_LINK:
2877 cnt = hdev->acl_cnt;
2878 break;
2879 case SCO_LINK:
2880 case ESCO_LINK:
2881 cnt = hdev->sco_cnt;
2882 break;
2883 case LE_LINK:
2884 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2885 break;
2886 default:
2887 cnt = 0;
2888 BT_ERR("Unknown link type");
2889 }
2890
2891 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 *quote = q ? q : 1;
2893 } else
2894 *quote = 0;
2895
2896 BT_DBG("conn %p quote %d", conn, *quote);
2897 return conn;
2898}
2899
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002900static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901{
2902 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002903 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904
Ville Tervobae1f5d92011-02-10 22:38:53 -03002905 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002907 rcu_read_lock();
2908
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002910 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002911 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002912 BT_ERR("%s killing stalled connection %pMR",
2913 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002914 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 }
2916 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002917
2918 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919}
2920
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002921static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2922 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002923{
2924 struct hci_conn_hash *h = &hdev->conn_hash;
2925 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002926 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002927 struct hci_conn *conn;
2928 int cnt, q, conn_num = 0;
2929
2930 BT_DBG("%s", hdev->name);
2931
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002932 rcu_read_lock();
2933
2934 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002935 struct hci_chan *tmp;
2936
2937 if (conn->type != type)
2938 continue;
2939
2940 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2941 continue;
2942
2943 conn_num++;
2944
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002945 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002946 struct sk_buff *skb;
2947
2948 if (skb_queue_empty(&tmp->data_q))
2949 continue;
2950
2951 skb = skb_peek(&tmp->data_q);
2952 if (skb->priority < cur_prio)
2953 continue;
2954
2955 if (skb->priority > cur_prio) {
2956 num = 0;
2957 min = ~0;
2958 cur_prio = skb->priority;
2959 }
2960
2961 num++;
2962
2963 if (conn->sent < min) {
2964 min = conn->sent;
2965 chan = tmp;
2966 }
2967 }
2968
2969 if (hci_conn_num(hdev, type) == conn_num)
2970 break;
2971 }
2972
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002973 rcu_read_unlock();
2974
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002975 if (!chan)
2976 return NULL;
2977
2978 switch (chan->conn->type) {
2979 case ACL_LINK:
2980 cnt = hdev->acl_cnt;
2981 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002982 case AMP_LINK:
2983 cnt = hdev->block_cnt;
2984 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002985 case SCO_LINK:
2986 case ESCO_LINK:
2987 cnt = hdev->sco_cnt;
2988 break;
2989 case LE_LINK:
2990 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2991 break;
2992 default:
2993 cnt = 0;
2994 BT_ERR("Unknown link type");
2995 }
2996
2997 q = cnt / num;
2998 *quote = q ? q : 1;
2999 BT_DBG("chan %p quote %d", chan, *quote);
3000 return chan;
3001}
3002
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003003static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3004{
3005 struct hci_conn_hash *h = &hdev->conn_hash;
3006 struct hci_conn *conn;
3007 int num = 0;
3008
3009 BT_DBG("%s", hdev->name);
3010
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003011 rcu_read_lock();
3012
3013 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003014 struct hci_chan *chan;
3015
3016 if (conn->type != type)
3017 continue;
3018
3019 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3020 continue;
3021
3022 num++;
3023
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003024 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003025 struct sk_buff *skb;
3026
3027 if (chan->sent) {
3028 chan->sent = 0;
3029 continue;
3030 }
3031
3032 if (skb_queue_empty(&chan->data_q))
3033 continue;
3034
3035 skb = skb_peek(&chan->data_q);
3036 if (skb->priority >= HCI_PRIO_MAX - 1)
3037 continue;
3038
3039 skb->priority = HCI_PRIO_MAX - 1;
3040
3041 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003042 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003043 }
3044
3045 if (hci_conn_num(hdev, type) == num)
3046 break;
3047 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003048
3049 rcu_read_unlock();
3050
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003051}
3052
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003053static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3054{
3055 /* Calculate count of blocks used by this packet */
3056 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3057}
3058
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003059static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061 if (!test_bit(HCI_RAW, &hdev->flags)) {
3062 /* ACL tx timeout must be longer than maximum
3063 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003064 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003065 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003066 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003068}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003070static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003071{
3072 unsigned int cnt = hdev->acl_cnt;
3073 struct hci_chan *chan;
3074 struct sk_buff *skb;
3075 int quote;
3076
3077 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003078
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003079 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003080 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003081 u32 priority = (skb_peek(&chan->data_q))->priority;
3082 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003083 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003084 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003085
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003086 /* Stop if priority has changed */
3087 if (skb->priority < priority)
3088 break;
3089
3090 skb = skb_dequeue(&chan->data_q);
3091
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003092 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003093 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003094
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 hci_send_frame(skb);
3096 hdev->acl_last_tx = jiffies;
3097
3098 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003099 chan->sent++;
3100 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 }
3102 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003103
3104 if (cnt != hdev->acl_cnt)
3105 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106}
3107
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003108static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003109{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003110 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003111 struct hci_chan *chan;
3112 struct sk_buff *skb;
3113 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003114 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003115
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003116 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003117
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003118 BT_DBG("%s", hdev->name);
3119
3120 if (hdev->dev_type == HCI_AMP)
3121 type = AMP_LINK;
3122 else
3123 type = ACL_LINK;
3124
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003125 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003126 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003127 u32 priority = (skb_peek(&chan->data_q))->priority;
3128 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3129 int blocks;
3130
3131 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003132 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003133
3134 /* Stop if priority has changed */
3135 if (skb->priority < priority)
3136 break;
3137
3138 skb = skb_dequeue(&chan->data_q);
3139
3140 blocks = __get_blocks(hdev, skb);
3141 if (blocks > hdev->block_cnt)
3142 return;
3143
3144 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003145 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003146
3147 hci_send_frame(skb);
3148 hdev->acl_last_tx = jiffies;
3149
3150 hdev->block_cnt -= blocks;
3151 quote -= blocks;
3152
3153 chan->sent += blocks;
3154 chan->conn->sent += blocks;
3155 }
3156 }
3157
3158 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003159 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003160}
3161
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003162static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003163{
3164 BT_DBG("%s", hdev->name);
3165
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003166 /* No ACL link over BR/EDR controller */
3167 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3168 return;
3169
3170 /* No AMP link over AMP controller */
3171 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003172 return;
3173
3174 switch (hdev->flow_ctl_mode) {
3175 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3176 hci_sched_acl_pkt(hdev);
3177 break;
3178
3179 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3180 hci_sched_acl_blk(hdev);
3181 break;
3182 }
3183}
3184
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003186static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187{
3188 struct hci_conn *conn;
3189 struct sk_buff *skb;
3190 int quote;
3191
3192 BT_DBG("%s", hdev->name);
3193
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003194 if (!hci_conn_num(hdev, SCO_LINK))
3195 return;
3196
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3198 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3199 BT_DBG("skb %p len %d", skb, skb->len);
3200 hci_send_frame(skb);
3201
3202 conn->sent++;
3203 if (conn->sent == ~0)
3204 conn->sent = 0;
3205 }
3206 }
3207}
3208
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003209static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003210{
3211 struct hci_conn *conn;
3212 struct sk_buff *skb;
3213 int quote;
3214
3215 BT_DBG("%s", hdev->name);
3216
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003217 if (!hci_conn_num(hdev, ESCO_LINK))
3218 return;
3219
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003220 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3221 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003222 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3223 BT_DBG("skb %p len %d", skb, skb->len);
3224 hci_send_frame(skb);
3225
3226 conn->sent++;
3227 if (conn->sent == ~0)
3228 conn->sent = 0;
3229 }
3230 }
3231}
3232
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003233static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003234{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003235 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003236 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003237 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003238
3239 BT_DBG("%s", hdev->name);
3240
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003241 if (!hci_conn_num(hdev, LE_LINK))
3242 return;
3243
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003244 if (!test_bit(HCI_RAW, &hdev->flags)) {
3245 /* LE tx timeout must be longer than maximum
3246 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003247 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003248 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003249 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003250 }
3251
3252 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003253 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003254 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003255 u32 priority = (skb_peek(&chan->data_q))->priority;
3256 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003257 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003258 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003259
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003260 /* Stop if priority has changed */
3261 if (skb->priority < priority)
3262 break;
3263
3264 skb = skb_dequeue(&chan->data_q);
3265
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003266 hci_send_frame(skb);
3267 hdev->le_last_tx = jiffies;
3268
3269 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003270 chan->sent++;
3271 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003272 }
3273 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003274
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003275 if (hdev->le_pkts)
3276 hdev->le_cnt = cnt;
3277 else
3278 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003279
3280 if (cnt != tmp)
3281 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003282}
3283
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003284static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003286 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 struct sk_buff *skb;
3288
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003289 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003290 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
3292 /* Schedule queues and send stuff to HCI driver */
3293
3294 hci_sched_acl(hdev);
3295
3296 hci_sched_sco(hdev);
3297
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003298 hci_sched_esco(hdev);
3299
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003300 hci_sched_le(hdev);
3301
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 /* Send next queued raw (unknown type) packet */
3303 while ((skb = skb_dequeue(&hdev->raw_q)))
3304 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305}
3306
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003307/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308
3309/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003310static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311{
3312 struct hci_acl_hdr *hdr = (void *) skb->data;
3313 struct hci_conn *conn;
3314 __u16 handle, flags;
3315
3316 skb_pull(skb, HCI_ACL_HDR_SIZE);
3317
3318 handle = __le16_to_cpu(hdr->handle);
3319 flags = hci_flags(handle);
3320 handle = hci_handle(handle);
3321
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003322 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003323 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324
3325 hdev->stat.acl_rx++;
3326
3327 hci_dev_lock(hdev);
3328 conn = hci_conn_hash_lookup_handle(hdev, handle);
3329 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003330
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003332 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003335 l2cap_recv_acldata(conn, skb, flags);
3336 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003338 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003339 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 }
3341
3342 kfree_skb(skb);
3343}
3344
3345/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003346static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347{
3348 struct hci_sco_hdr *hdr = (void *) skb->data;
3349 struct hci_conn *conn;
3350 __u16 handle;
3351
3352 skb_pull(skb, HCI_SCO_HDR_SIZE);
3353
3354 handle = __le16_to_cpu(hdr->handle);
3355
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003356 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357
3358 hdev->stat.sco_rx++;
3359
3360 hci_dev_lock(hdev);
3361 conn = hci_conn_hash_lookup_handle(hdev, handle);
3362 hci_dev_unlock(hdev);
3363
3364 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003366 sco_recv_scodata(conn, skb);
3367 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003369 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003370 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 }
3372
3373 kfree_skb(skb);
3374}
3375
Johan Hedberg9238f362013-03-05 20:37:48 +02003376static bool hci_req_is_complete(struct hci_dev *hdev)
3377{
3378 struct sk_buff *skb;
3379
3380 skb = skb_peek(&hdev->cmd_q);
3381 if (!skb)
3382 return true;
3383
3384 return bt_cb(skb)->req.start;
3385}
3386
Johan Hedberg42c6b122013-03-05 20:37:49 +02003387static void hci_resend_last(struct hci_dev *hdev)
3388{
3389 struct hci_command_hdr *sent;
3390 struct sk_buff *skb;
3391 u16 opcode;
3392
3393 if (!hdev->sent_cmd)
3394 return;
3395
3396 sent = (void *) hdev->sent_cmd->data;
3397 opcode = __le16_to_cpu(sent->opcode);
3398 if (opcode == HCI_OP_RESET)
3399 return;
3400
3401 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3402 if (!skb)
3403 return;
3404
3405 skb_queue_head(&hdev->cmd_q, skb);
3406 queue_work(hdev->workqueue, &hdev->cmd_work);
3407}
3408
Johan Hedberg9238f362013-03-05 20:37:48 +02003409void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3410{
3411 hci_req_complete_t req_complete = NULL;
3412 struct sk_buff *skb;
3413 unsigned long flags;
3414
3415 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3416
Johan Hedberg42c6b122013-03-05 20:37:49 +02003417 /* If the completed command doesn't match the last one that was
3418 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003419 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003420 if (!hci_sent_cmd_data(hdev, opcode)) {
3421 /* Some CSR based controllers generate a spontaneous
3422 * reset complete event during init and any pending
3423 * command will never be completed. In such a case we
3424 * need to resend whatever was the last sent
3425 * command.
3426 */
3427 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3428 hci_resend_last(hdev);
3429
Johan Hedberg9238f362013-03-05 20:37:48 +02003430 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003431 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003432
3433 /* If the command succeeded and there's still more commands in
3434 * this request the request is not yet complete.
3435 */
3436 if (!status && !hci_req_is_complete(hdev))
3437 return;
3438
3439 /* If this was the last command in a request the complete
3440 * callback would be found in hdev->sent_cmd instead of the
3441 * command queue (hdev->cmd_q).
3442 */
3443 if (hdev->sent_cmd) {
3444 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003445
3446 if (req_complete) {
3447 /* We must set the complete callback to NULL to
3448 * avoid calling the callback more than once if
3449 * this function gets called again.
3450 */
3451 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3452
Johan Hedberg9238f362013-03-05 20:37:48 +02003453 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003454 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003455 }
3456
3457 /* Remove all pending commands belonging to this request */
3458 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3459 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3460 if (bt_cb(skb)->req.start) {
3461 __skb_queue_head(&hdev->cmd_q, skb);
3462 break;
3463 }
3464
3465 req_complete = bt_cb(skb)->req.complete;
3466 kfree_skb(skb);
3467 }
3468 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3469
3470call_complete:
3471 if (req_complete)
3472 req_complete(hdev, status);
3473}
3474
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003475static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003477 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 struct sk_buff *skb;
3479
3480 BT_DBG("%s", hdev->name);
3481
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003483 /* Send copy to monitor */
3484 hci_send_to_monitor(hdev, skb);
3485
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 if (atomic_read(&hdev->promisc)) {
3487 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003488 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 }
3490
3491 if (test_bit(HCI_RAW, &hdev->flags)) {
3492 kfree_skb(skb);
3493 continue;
3494 }
3495
3496 if (test_bit(HCI_INIT, &hdev->flags)) {
3497 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003498 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 case HCI_ACLDATA_PKT:
3500 case HCI_SCODATA_PKT:
3501 kfree_skb(skb);
3502 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 }
3505
3506 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003507 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003509 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 hci_event_packet(hdev, skb);
3511 break;
3512
3513 case HCI_ACLDATA_PKT:
3514 BT_DBG("%s ACL data packet", hdev->name);
3515 hci_acldata_packet(hdev, skb);
3516 break;
3517
3518 case HCI_SCODATA_PKT:
3519 BT_DBG("%s SCO data packet", hdev->name);
3520 hci_scodata_packet(hdev, skb);
3521 break;
3522
3523 default:
3524 kfree_skb(skb);
3525 break;
3526 }
3527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528}
3529
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003530static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003532 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 struct sk_buff *skb;
3534
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003535 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3536 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003539 if (atomic_read(&hdev->cmd_cnt)) {
3540 skb = skb_dequeue(&hdev->cmd_q);
3541 if (!skb)
3542 return;
3543
Wei Yongjun7585b972009-02-25 18:29:52 +08003544 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003546 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3547 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 atomic_dec(&hdev->cmd_cnt);
3549 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003550 if (test_bit(HCI_RESET, &hdev->flags))
3551 del_timer(&hdev->cmd_timer);
3552 else
3553 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003554 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 } else {
3556 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003557 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 }
3559 }
3560}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003561
3562int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3563{
3564 /* General inquiry access code (GIAC) */
3565 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3566 struct hci_cp_inquiry cp;
3567
3568 BT_DBG("%s", hdev->name);
3569
3570 if (test_bit(HCI_INQUIRY, &hdev->flags))
3571 return -EINPROGRESS;
3572
Johan Hedberg46632622012-01-02 16:06:08 +02003573 inquiry_cache_flush(hdev);
3574
Andre Guedes2519a1f2011-11-07 11:45:24 -03003575 memset(&cp, 0, sizeof(cp));
3576 memcpy(&cp.lap, lap, sizeof(cp.lap));
3577 cp.length = length;
3578
3579 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3580}
Andre Guedes023d50492011-11-04 14:16:52 -03003581
3582int hci_cancel_inquiry(struct hci_dev *hdev)
3583{
3584 BT_DBG("%s", hdev->name);
3585
3586 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003587 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003588
3589 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3590}
Andre Guedes31f79562012-04-24 21:02:53 -03003591
3592u8 bdaddr_to_le(u8 bdaddr_type)
3593{
3594 switch (bdaddr_type) {
3595 case BDADDR_LE_PUBLIC:
3596 return ADDR_LE_DEV_PUBLIC;
3597
3598 default:
3599 /* Fallback to LE Random address type */
3600 return ADDR_LE_DEV_RANDOM;
3601 }
3602}