blob: 750c360f96db6ef0aa92a98a2989a8d42e9a066d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300610static void hci_set_event_mask_page_2(struct hci_request *req)
611{
612 struct hci_dev *hdev = req->hdev;
613 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
614
615 /* If Connectionless Slave Broadcast master role is supported
616 * enable all necessary events for it.
617 */
618 if (hdev->features[2][0] & 0x01) {
619 events[1] |= 0x40; /* Triggered Clock Capture */
620 events[1] |= 0x80; /* Synchronization Train Complete */
621 events[2] |= 0x10; /* Slave Page Response Timeout */
622 events[2] |= 0x20; /* CSB Channel Map Change */
623 }
624
625 /* If Connectionless Slave Broadcast slave role is supported
626 * enable all necessary events for it.
627 */
628 if (hdev->features[2][0] & 0x02) {
629 events[2] |= 0x01; /* Synchronization Train Received */
630 events[2] |= 0x02; /* CSB Receive */
631 events[2] |= 0x04; /* CSB Timeout */
632 events[2] |= 0x08; /* Truncated Page Complete */
633 }
634
635 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
636}
637
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300641 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100643 /* Some Broadcom based Bluetooth controllers do not support the
644 * Delete Stored Link Key command. They are clearly indicating its
645 * absence in the bit mask of supported commands.
646 *
647 * Check the supported commands and only if the the command is marked
648 * as supported send it. If not supported assume that the controller
649 * does not have actual support for stored link keys which makes this
650 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700651 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300652 if (hdev->commands[6] & 0x80) {
653 struct hci_cp_delete_stored_link_key cp;
654
655 bacpy(&cp.bdaddr, BDADDR_ANY);
656 cp.delete_all = 0x01;
657 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
658 sizeof(cp), &cp);
659 }
660
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500664 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500666 hci_update_ad(req);
667 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300668
669 /* Read features beyond page 1 if available */
670 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671 struct hci_cp_read_local_ext_features cp;
672
673 cp.page = p;
674 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675 sizeof(cp), &cp);
676 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677}
678
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300679static void hci_init4_req(struct hci_request *req, unsigned long opt)
680{
681 struct hci_dev *hdev = req->hdev;
682
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300683 /* Set event mask page 2 if the HCI command for it is supported */
684 if (hdev->commands[22] & 0x04)
685 hci_set_event_mask_page_2(req);
686
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300687 /* Check for Synchronization Train support */
688 if (hdev->features[2][0] & 0x04)
689 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
690}
691
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692static int __hci_init(struct hci_dev *hdev)
693{
694 int err;
695
696 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
697 if (err < 0)
698 return err;
699
700 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701 * BR/EDR/LE type controllers. AMP controllers only need the
702 * first stage init.
703 */
704 if (hdev->dev_type != HCI_BREDR)
705 return 0;
706
707 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
708 if (err < 0)
709 return err;
710
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300711 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
712 if (err < 0)
713 return err;
714
715 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716}
717
Johan Hedberg42c6b122013-03-05 20:37:49 +0200718static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
720 __u8 scan = opt;
721
Johan Hedberg42c6b122013-03-05 20:37:49 +0200722 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200725 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
Johan Hedberg42c6b122013-03-05 20:37:49 +0200728static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
730 __u8 auth = opt;
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Johan Hedberg42c6b122013-03-05 20:37:49 +0200738static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
740 __u8 encrypt = opt;
741
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200744 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200745 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200749{
750 __le16 policy = cpu_to_le16(opt);
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200753
754 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200755 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200756}
757
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900758/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 * Device is held on return. */
760struct hci_dev *hci_dev_get(int index)
761{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200762 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764 BT_DBG("%d", index);
765
766 if (index < 0)
767 return NULL;
768
769 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200770 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (d->id == index) {
772 hdev = hci_dev_hold(d);
773 break;
774 }
775 }
776 read_unlock(&hci_dev_list_lock);
777 return hdev;
778}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200781
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200782bool hci_discovery_active(struct hci_dev *hdev)
783{
784 struct discovery_state *discov = &hdev->discovery;
785
Andre Guedes6fbe1952012-02-03 17:47:58 -0300786 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300787 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300788 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200789 return true;
790
Andre Guedes6fbe1952012-02-03 17:47:58 -0300791 default:
792 return false;
793 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200794}
795
Johan Hedbergff9ef572012-01-04 14:23:45 +0200796void hci_discovery_set_state(struct hci_dev *hdev, int state)
797{
798 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
799
800 if (hdev->discovery.state == state)
801 return;
802
803 switch (state) {
804 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300805 if (hdev->discovery.state != DISCOVERY_STARTING)
806 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200807 break;
808 case DISCOVERY_STARTING:
809 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300810 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200811 mgmt_discovering(hdev, 1);
812 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200813 case DISCOVERY_RESOLVING:
814 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200815 case DISCOVERY_STOPPING:
816 break;
817 }
818
819 hdev->discovery.state = state;
820}
821
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300822void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
Johan Hedberg30883512012-01-04 14:16:21 +0200824 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200825 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
Johan Hedberg561aafb2012-01-04 13:31:59 +0200827 list_for_each_entry_safe(p, n, &cache->all, all) {
828 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200829 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200831
832 INIT_LIST_HEAD(&cache->unknown);
833 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834}
835
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300836struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
837 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838{
Johan Hedberg30883512012-01-04 14:16:21 +0200839 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 struct inquiry_entry *e;
841
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300842 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Johan Hedberg561aafb2012-01-04 13:31:59 +0200844 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200846 return e;
847 }
848
849 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850}
851
Johan Hedberg561aafb2012-01-04 13:31:59 +0200852struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300853 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200854{
Johan Hedberg30883512012-01-04 14:16:21 +0200855 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856 struct inquiry_entry *e;
857
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300858 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200859
860 list_for_each_entry(e, &cache->unknown, list) {
861 if (!bacmp(&e->data.bdaddr, bdaddr))
862 return e;
863 }
864
865 return NULL;
866}
867
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200868struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300869 bdaddr_t *bdaddr,
870 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200871{
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_entry *e;
874
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300875 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200876
877 list_for_each_entry(e, &cache->resolve, list) {
878 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
879 return e;
880 if (!bacmp(&e->data.bdaddr, bdaddr))
881 return e;
882 }
883
884 return NULL;
885}
886
Johan Hedberga3d4e202012-01-09 00:53:02 +0200887void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300888 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200889{
890 struct discovery_state *cache = &hdev->discovery;
891 struct list_head *pos = &cache->resolve;
892 struct inquiry_entry *p;
893
894 list_del(&ie->list);
895
896 list_for_each_entry(p, &cache->resolve, list) {
897 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300898 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200899 break;
900 pos = &p->list;
901 }
902
903 list_add(&ie->list, pos);
904}
905
Johan Hedberg31754052012-01-04 13:39:52 +0200906bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300907 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Johan Hedberg30883512012-01-04 14:16:21 +0200909 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200910 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300912 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Szymon Janc2b2fec42012-11-20 11:38:54 +0100914 hci_remove_remote_oob_data(hdev, &data->bdaddr);
915
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200916 if (ssp)
917 *ssp = data->ssp_mode;
918
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200919 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200920 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200921 if (ie->data.ssp_mode && ssp)
922 *ssp = true;
923
Johan Hedberga3d4e202012-01-09 00:53:02 +0200924 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300925 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200926 ie->data.rssi = data->rssi;
927 hci_inquiry_cache_update_resolve(hdev, ie);
928 }
929
Johan Hedberg561aafb2012-01-04 13:31:59 +0200930 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200931 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200932
Johan Hedberg561aafb2012-01-04 13:31:59 +0200933 /* Entry not in the cache. Add new one. */
934 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
935 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200936 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200937
938 list_add(&ie->all, &cache->all);
939
940 if (name_known) {
941 ie->name_state = NAME_KNOWN;
942 } else {
943 ie->name_state = NAME_NOT_KNOWN;
944 list_add(&ie->list, &cache->unknown);
945 }
946
947update:
948 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300949 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200950 ie->name_state = NAME_KNOWN;
951 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200954 memcpy(&ie->data, data, sizeof(*data));
955 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200957
958 if (ie->name_state == NAME_NOT_KNOWN)
959 return false;
960
961 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962}
963
964static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
965{
Johan Hedberg30883512012-01-04 14:16:21 +0200966 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 struct inquiry_info *info = (struct inquiry_info *) buf;
968 struct inquiry_entry *e;
969 int copied = 0;
970
Johan Hedberg561aafb2012-01-04 13:31:59 +0200971 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200973
974 if (copied >= num)
975 break;
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 bacpy(&info->bdaddr, &data->bdaddr);
978 info->pscan_rep_mode = data->pscan_rep_mode;
979 info->pscan_period_mode = data->pscan_period_mode;
980 info->pscan_mode = data->pscan_mode;
981 memcpy(info->dev_class, data->dev_class, 3);
982 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200985 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987
988 BT_DBG("cache %p, copied %d", cache, copied);
989 return copied;
990}
991
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993{
994 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 struct hci_cp_inquiry cp;
997
998 BT_DBG("%s", hdev->name);
999
1000 if (test_bit(HCI_INQUIRY, &hdev->flags))
1001 return;
1002
1003 /* Start Inquiry */
1004 memcpy(&cp.lap, &ir->lap, 3);
1005 cp.length = ir->length;
1006 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008}
1009
Andre Guedes3e13fa12013-03-27 20:04:56 -03001010static int wait_inquiry(void *word)
1011{
1012 schedule();
1013 return signal_pending(current);
1014}
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016int hci_inquiry(void __user *arg)
1017{
1018 __u8 __user *ptr = arg;
1019 struct hci_inquiry_req ir;
1020 struct hci_dev *hdev;
1021 int err = 0, do_inquiry = 0, max_rsp;
1022 long timeo;
1023 __u8 *buf;
1024
1025 if (copy_from_user(&ir, ptr, sizeof(ir)))
1026 return -EFAULT;
1027
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001028 hdev = hci_dev_get(ir.dev_id);
1029 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 return -ENODEV;
1031
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001032 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1033 err = -EBUSY;
1034 goto done;
1035 }
1036
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001037 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001038 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001039 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001040 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 do_inquiry = 1;
1042 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001043 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Marcel Holtmann04837f62006-07-03 10:02:33 +02001045 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001046
1047 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001048 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1049 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001050 if (err < 0)
1051 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001052
1053 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054 * cleared). If it is interrupted by a signal, return -EINTR.
1055 */
1056 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057 TASK_INTERRUPTIBLE))
1058 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001061 /* for unlimited number of responses we will use buffer with
1062 * 255 entries
1063 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1065
1066 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067 * copy it to the user space.
1068 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001069 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001070 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 err = -ENOMEM;
1072 goto done;
1073 }
1074
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001075 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001077 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 BT_DBG("num_rsp %d", ir.num_rsp);
1080
1081 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1082 ptr += sizeof(ir);
1083 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001084 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001086 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 err = -EFAULT;
1088
1089 kfree(buf);
1090
1091done:
1092 hci_dev_put(hdev);
1093 return err;
1094}
1095
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001096static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1097{
1098 u8 ad_len = 0, flags = 0;
1099 size_t name_len;
1100
1101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102 flags |= LE_AD_GENERAL;
1103
1104 if (!lmp_bredr_capable(hdev))
1105 flags |= LE_AD_NO_BREDR;
1106
1107 if (lmp_le_br_capable(hdev))
1108 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1109
1110 if (lmp_host_le_br_capable(hdev))
1111 flags |= LE_AD_SIM_LE_BREDR_HOST;
1112
1113 if (flags) {
1114 BT_DBG("adv flags 0x%02x", flags);
1115
1116 ptr[0] = 2;
1117 ptr[1] = EIR_FLAGS;
1118 ptr[2] = flags;
1119
1120 ad_len += 3;
1121 ptr += 3;
1122 }
1123
1124 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1125 ptr[0] = 2;
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->adv_tx_power;
1128
1129 ad_len += 3;
1130 ptr += 3;
1131 }
1132
1133 name_len = strlen(hdev->dev_name);
1134 if (name_len > 0) {
1135 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1136
1137 if (name_len > max_len) {
1138 name_len = max_len;
1139 ptr[1] = EIR_NAME_SHORT;
1140 } else
1141 ptr[1] = EIR_NAME_COMPLETE;
1142
1143 ptr[0] = name_len + 1;
1144
1145 memcpy(ptr + 2, hdev->dev_name, name_len);
1146
1147 ad_len += (name_len + 2);
1148 ptr += (name_len + 2);
1149 }
1150
1151 return ad_len;
1152}
1153
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001154void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001155{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001156 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001157 struct hci_cp_le_set_adv_data cp;
1158 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001159
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001160 if (!lmp_le_capable(hdev))
1161 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001162
1163 memset(&cp, 0, sizeof(cp));
1164
1165 len = create_ad(hdev, cp.data);
1166
1167 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001168 memcmp(cp.data, hdev->adv_data, len) == 0)
1169 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001170
1171 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172 hdev->adv_data_len = len;
1173
1174 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001175
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001176 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001177}
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179/* ---- HCI ioctl helpers ---- */
1180
1181int hci_dev_open(__u16 dev)
1182{
1183 struct hci_dev *hdev;
1184 int ret = 0;
1185
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001186 hdev = hci_dev_get(dev);
1187 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 return -ENODEV;
1189
1190 BT_DBG("%s %p", hdev->name, hdev);
1191
1192 hci_req_lock(hdev);
1193
Johan Hovold94324962012-03-15 14:48:41 +01001194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195 ret = -ENODEV;
1196 goto done;
1197 }
1198
Johan Hedbergbf543032013-09-13 08:58:18 +03001199 /* Check for rfkill but allow the HCI setup stage to proceed
1200 * (which in itself doesn't cause any RF activity).
1201 */
1202 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1203 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001204 ret = -ERFKILL;
1205 goto done;
1206 }
1207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 if (test_bit(HCI_UP, &hdev->flags)) {
1209 ret = -EALREADY;
1210 goto done;
1211 }
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 if (hdev->open(hdev)) {
1214 ret = -EIO;
1215 goto done;
1216 }
1217
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001218 atomic_set(&hdev->cmd_cnt, 1);
1219 set_bit(HCI_INIT, &hdev->flags);
1220
1221 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1222 ret = hdev->setup(hdev);
1223
1224 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001225 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1226 set_bit(HCI_RAW, &hdev->flags);
1227
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001228 if (!test_bit(HCI_RAW, &hdev->flags) &&
1229 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001230 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 }
1232
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001233 clear_bit(HCI_INIT, &hdev->flags);
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 if (!ret) {
1236 hci_dev_hold(hdev);
1237 set_bit(HCI_UP, &hdev->flags);
1238 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001239 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001240 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001241 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001242 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001243 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001244 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001245 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001246 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001248 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001249 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001250 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
1252 skb_queue_purge(&hdev->cmd_q);
1253 skb_queue_purge(&hdev->rx_q);
1254
1255 if (hdev->flush)
1256 hdev->flush(hdev);
1257
1258 if (hdev->sent_cmd) {
1259 kfree_skb(hdev->sent_cmd);
1260 hdev->sent_cmd = NULL;
1261 }
1262
1263 hdev->close(hdev);
1264 hdev->flags = 0;
1265 }
1266
1267done:
1268 hci_req_unlock(hdev);
1269 hci_dev_put(hdev);
1270 return ret;
1271}
1272
1273static int hci_dev_do_close(struct hci_dev *hdev)
1274{
1275 BT_DBG("%s %p", hdev->name, hdev);
1276
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001277 cancel_delayed_work(&hdev->power_off);
1278
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 hci_req_cancel(hdev, ENODEV);
1280 hci_req_lock(hdev);
1281
1282 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001283 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 hci_req_unlock(hdev);
1285 return 0;
1286 }
1287
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001288 /* Flush RX and TX works */
1289 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001290 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001292 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001293 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001294 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001295 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001296 }
1297
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001298 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001299 cancel_delayed_work(&hdev->service_cache);
1300
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001301 cancel_delayed_work_sync(&hdev->le_scan_disable);
1302
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001303 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001304 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001306 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
1308 hci_notify(hdev, HCI_DEV_DOWN);
1309
1310 if (hdev->flush)
1311 hdev->flush(hdev);
1312
1313 /* Reset device */
1314 skb_queue_purge(&hdev->cmd_q);
1315 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001316 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001317 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001319 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 clear_bit(HCI_INIT, &hdev->flags);
1321 }
1322
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001323 /* flush cmd work */
1324 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
1326 /* Drop queues */
1327 skb_queue_purge(&hdev->rx_q);
1328 skb_queue_purge(&hdev->cmd_q);
1329 skb_queue_purge(&hdev->raw_q);
1330
1331 /* Drop last sent command */
1332 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001333 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 kfree_skb(hdev->sent_cmd);
1335 hdev->sent_cmd = NULL;
1336 }
1337
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001338 kfree_skb(hdev->recv_evt);
1339 hdev->recv_evt = NULL;
1340
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 /* After this point our queues are empty
1342 * and no tasks are scheduled. */
1343 hdev->close(hdev);
1344
Johan Hedberg35b973c2013-03-15 17:06:59 -05001345 /* Clear flags */
1346 hdev->flags = 0;
1347 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1348
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001349 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1350 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001351 hci_dev_lock(hdev);
1352 mgmt_powered(hdev, 0);
1353 hci_dev_unlock(hdev);
1354 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001355
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001356 /* Controller radio is available but is currently powered down */
1357 hdev->amp_status = 0;
1358
Johan Hedberge59fda82012-02-22 18:11:53 +02001359 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001360 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001361
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 hci_req_unlock(hdev);
1363
1364 hci_dev_put(hdev);
1365 return 0;
1366}
1367
1368int hci_dev_close(__u16 dev)
1369{
1370 struct hci_dev *hdev;
1371 int err;
1372
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001373 hdev = hci_dev_get(dev);
1374 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001376
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001377 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1378 err = -EBUSY;
1379 goto done;
1380 }
1381
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001382 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1383 cancel_delayed_work(&hdev->power_off);
1384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001386
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001387done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 hci_dev_put(hdev);
1389 return err;
1390}
1391
1392int hci_dev_reset(__u16 dev)
1393{
1394 struct hci_dev *hdev;
1395 int ret = 0;
1396
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001397 hdev = hci_dev_get(dev);
1398 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 return -ENODEV;
1400
1401 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Marcel Holtmann808a0492013-08-26 20:57:58 -07001403 if (!test_bit(HCI_UP, &hdev->flags)) {
1404 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001408 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1409 ret = -EBUSY;
1410 goto done;
1411 }
1412
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 /* Drop queues */
1414 skb_queue_purge(&hdev->rx_q);
1415 skb_queue_purge(&hdev->cmd_q);
1416
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001417 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001418 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001420 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
1422 if (hdev->flush)
1423 hdev->flush(hdev);
1424
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001425 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001426 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
1428 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001429 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
1431done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 hci_req_unlock(hdev);
1433 hci_dev_put(hdev);
1434 return ret;
1435}
1436
1437int hci_dev_reset_stat(__u16 dev)
1438{
1439 struct hci_dev *hdev;
1440 int ret = 0;
1441
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001442 hdev = hci_dev_get(dev);
1443 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 return -ENODEV;
1445
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001446 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1447 ret = -EBUSY;
1448 goto done;
1449 }
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1452
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001453done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 return ret;
1456}
1457
1458int hci_dev_cmd(unsigned int cmd, void __user *arg)
1459{
1460 struct hci_dev *hdev;
1461 struct hci_dev_req dr;
1462 int err = 0;
1463
1464 if (copy_from_user(&dr, arg, sizeof(dr)))
1465 return -EFAULT;
1466
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001467 hdev = hci_dev_get(dr.dev_id);
1468 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 return -ENODEV;
1470
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001471 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1472 err = -EBUSY;
1473 goto done;
1474 }
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 switch (cmd) {
1477 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001478 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1479 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 break;
1481
1482 case HCISETENCRYPT:
1483 if (!lmp_encrypt_capable(hdev)) {
1484 err = -EOPNOTSUPP;
1485 break;
1486 }
1487
1488 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1489 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001490 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1491 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 if (err)
1493 break;
1494 }
1495
Johan Hedberg01178cd2013-03-05 20:37:41 +02001496 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1497 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 break;
1499
1500 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001501 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1502 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 break;
1504
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001505 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001506 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1507 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001508 break;
1509
1510 case HCISETLINKMODE:
1511 hdev->link_mode = ((__u16) dr.dev_opt) &
1512 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1513 break;
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 case HCISETPTYPE:
1516 hdev->pkt_type = (__u16) dr.dev_opt;
1517 break;
1518
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001520 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1521 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 break;
1523
1524 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001525 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1526 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 break;
1528
1529 default:
1530 err = -EINVAL;
1531 break;
1532 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001533
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001534done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 hci_dev_put(hdev);
1536 return err;
1537}
1538
1539int hci_get_dev_list(void __user *arg)
1540{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001541 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 struct hci_dev_list_req *dl;
1543 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 int n = 0, size, err;
1545 __u16 dev_num;
1546
1547 if (get_user(dev_num, (__u16 __user *) arg))
1548 return -EFAULT;
1549
1550 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1551 return -EINVAL;
1552
1553 size = sizeof(*dl) + dev_num * sizeof(*dr);
1554
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001555 dl = kzalloc(size, GFP_KERNEL);
1556 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 return -ENOMEM;
1558
1559 dr = dl->dev_req;
1560
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001561 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001562 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001563 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001564 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001565
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001566 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1567 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 (dr + n)->dev_id = hdev->id;
1570 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001571
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 if (++n >= dev_num)
1573 break;
1574 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001575 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
1577 dl->dev_num = n;
1578 size = sizeof(*dl) + n * sizeof(*dr);
1579
1580 err = copy_to_user(arg, dl, size);
1581 kfree(dl);
1582
1583 return err ? -EFAULT : 0;
1584}
1585
1586int hci_get_dev_info(void __user *arg)
1587{
1588 struct hci_dev *hdev;
1589 struct hci_dev_info di;
1590 int err = 0;
1591
1592 if (copy_from_user(&di, arg, sizeof(di)))
1593 return -EFAULT;
1594
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001595 hdev = hci_dev_get(di.dev_id);
1596 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 return -ENODEV;
1598
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001599 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001600 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001601
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001602 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1603 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001604
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 strcpy(di.name, hdev->name);
1606 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001607 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 di.flags = hdev->flags;
1609 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001610 if (lmp_bredr_capable(hdev)) {
1611 di.acl_mtu = hdev->acl_mtu;
1612 di.acl_pkts = hdev->acl_pkts;
1613 di.sco_mtu = hdev->sco_mtu;
1614 di.sco_pkts = hdev->sco_pkts;
1615 } else {
1616 di.acl_mtu = hdev->le_mtu;
1617 di.acl_pkts = hdev->le_pkts;
1618 di.sco_mtu = 0;
1619 di.sco_pkts = 0;
1620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 di.link_policy = hdev->link_policy;
1622 di.link_mode = hdev->link_mode;
1623
1624 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1625 memcpy(&di.features, &hdev->features, sizeof(di.features));
1626
1627 if (copy_to_user(arg, &di, sizeof(di)))
1628 err = -EFAULT;
1629
1630 hci_dev_put(hdev);
1631
1632 return err;
1633}
1634
1635/* ---- Interface to HCI drivers ---- */
1636
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001637static int hci_rfkill_set_block(void *data, bool blocked)
1638{
1639 struct hci_dev *hdev = data;
1640
1641 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1642
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001643 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1644 return -EBUSY;
1645
Johan Hedberg5e130362013-09-13 08:58:17 +03001646 if (blocked) {
1647 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001648 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1649 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001650 } else {
1651 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001652 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001653
1654 return 0;
1655}
1656
1657static const struct rfkill_ops hci_rfkill_ops = {
1658 .set_block = hci_rfkill_set_block,
1659};
1660
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001661static void hci_power_on(struct work_struct *work)
1662{
1663 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001664 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001665
1666 BT_DBG("%s", hdev->name);
1667
Johan Hedberg96570ff2013-05-29 09:51:29 +03001668 err = hci_dev_open(hdev->id);
1669 if (err < 0) {
1670 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001671 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001672 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001673
Johan Hedbergbf543032013-09-13 08:58:18 +03001674 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1675 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1676 hci_dev_do_close(hdev);
1677 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001678 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1679 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001680 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001681
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001682 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001683 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001684}
1685
1686static void hci_power_off(struct work_struct *work)
1687{
Johan Hedberg32435532011-11-07 22:16:04 +02001688 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001689 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001690
1691 BT_DBG("%s", hdev->name);
1692
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001693 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001694}
1695
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001696static void hci_discov_off(struct work_struct *work)
1697{
1698 struct hci_dev *hdev;
1699 u8 scan = SCAN_PAGE;
1700
1701 hdev = container_of(work, struct hci_dev, discov_off.work);
1702
1703 BT_DBG("%s", hdev->name);
1704
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001705 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001706
1707 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1708
1709 hdev->discov_timeout = 0;
1710
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001711 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001712}
1713
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001714int hci_uuids_clear(struct hci_dev *hdev)
1715{
Johan Hedberg48210022013-01-27 00:31:28 +02001716 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001717
Johan Hedberg48210022013-01-27 00:31:28 +02001718 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1719 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001720 kfree(uuid);
1721 }
1722
1723 return 0;
1724}
1725
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001726int hci_link_keys_clear(struct hci_dev *hdev)
1727{
1728 struct list_head *p, *n;
1729
1730 list_for_each_safe(p, n, &hdev->link_keys) {
1731 struct link_key *key;
1732
1733 key = list_entry(p, struct link_key, list);
1734
1735 list_del(p);
1736 kfree(key);
1737 }
1738
1739 return 0;
1740}
1741
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001742int hci_smp_ltks_clear(struct hci_dev *hdev)
1743{
1744 struct smp_ltk *k, *tmp;
1745
1746 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1747 list_del(&k->list);
1748 kfree(k);
1749 }
1750
1751 return 0;
1752}
1753
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001754struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1755{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001756 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001757
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001758 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001759 if (bacmp(bdaddr, &k->bdaddr) == 0)
1760 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001761
1762 return NULL;
1763}
1764
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301765static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001766 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001767{
1768 /* Legacy key */
1769 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301770 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001771
1772 /* Debug keys are insecure so don't store them persistently */
1773 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301774 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001775
1776 /* Changed combination key and there's no previous one */
1777 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301778 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001779
1780 /* Security mode 3 case */
1781 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301782 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001783
1784 /* Neither local nor remote side had no-bonding as requirement */
1785 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301786 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001787
1788 /* Local side had dedicated bonding as requirement */
1789 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301790 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001791
1792 /* Remote side had dedicated bonding as requirement */
1793 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301794 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001795
1796 /* If none of the above criteria match, then don't store the key
1797 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301798 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001799}
1800
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001801struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001802{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001803 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001804
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001805 list_for_each_entry(k, &hdev->long_term_keys, list) {
1806 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001807 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001808 continue;
1809
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001810 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001811 }
1812
1813 return NULL;
1814}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001815
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001816struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001817 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001818{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001819 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001820
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001821 list_for_each_entry(k, &hdev->long_term_keys, list)
1822 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001823 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001824 return k;
1825
1826 return NULL;
1827}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001828
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001829int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001830 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001831{
1832 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301833 u8 old_key_type;
1834 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001835
1836 old_key = hci_find_link_key(hdev, bdaddr);
1837 if (old_key) {
1838 old_key_type = old_key->type;
1839 key = old_key;
1840 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001841 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001842 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1843 if (!key)
1844 return -ENOMEM;
1845 list_add(&key->list, &hdev->link_keys);
1846 }
1847
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001848 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001849
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001850 /* Some buggy controller combinations generate a changed
1851 * combination key for legacy pairing even when there's no
1852 * previous key */
1853 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001854 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001855 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001856 if (conn)
1857 conn->key_type = type;
1858 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001859
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001860 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001861 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001862 key->pin_len = pin_len;
1863
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001864 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001865 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001866 else
1867 key->type = type;
1868
Johan Hedberg4df378a2011-04-28 11:29:03 -07001869 if (!new_key)
1870 return 0;
1871
1872 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1873
Johan Hedberg744cf192011-11-08 20:40:14 +02001874 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001875
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301876 if (conn)
1877 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001878
1879 return 0;
1880}
1881
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001882int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001883 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001884 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001885{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001886 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001887
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001888 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1889 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001890
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001891 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1892 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001893 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001894 else {
1895 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001896 if (!key)
1897 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001898 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001899 }
1900
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001901 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001902 key->bdaddr_type = addr_type;
1903 memcpy(key->val, tk, sizeof(key->val));
1904 key->authenticated = authenticated;
1905 key->ediv = ediv;
1906 key->enc_size = enc_size;
1907 key->type = type;
1908 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001909
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001910 if (!new_key)
1911 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001912
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001913 if (type & HCI_SMP_LTK)
1914 mgmt_new_ltk(hdev, key, 1);
1915
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001916 return 0;
1917}
1918
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001919int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1920{
1921 struct link_key *key;
1922
1923 key = hci_find_link_key(hdev, bdaddr);
1924 if (!key)
1925 return -ENOENT;
1926
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001927 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001928
1929 list_del(&key->list);
1930 kfree(key);
1931
1932 return 0;
1933}
1934
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001935int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1936{
1937 struct smp_ltk *k, *tmp;
1938
1939 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1940 if (bacmp(bdaddr, &k->bdaddr))
1941 continue;
1942
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001943 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001944
1945 list_del(&k->list);
1946 kfree(k);
1947 }
1948
1949 return 0;
1950}
1951
Ville Tervo6bd32322011-02-16 16:32:41 +02001952/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001953static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001954{
1955 struct hci_dev *hdev = (void *) arg;
1956
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001957 if (hdev->sent_cmd) {
1958 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1959 u16 opcode = __le16_to_cpu(sent->opcode);
1960
1961 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1962 } else {
1963 BT_ERR("%s command tx timeout", hdev->name);
1964 }
1965
Ville Tervo6bd32322011-02-16 16:32:41 +02001966 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001967 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001968}
1969
Szymon Janc2763eda2011-03-22 13:12:22 +01001970struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001971 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001972{
1973 struct oob_data *data;
1974
1975 list_for_each_entry(data, &hdev->remote_oob_data, list)
1976 if (bacmp(bdaddr, &data->bdaddr) == 0)
1977 return data;
1978
1979 return NULL;
1980}
1981
1982int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1983{
1984 struct oob_data *data;
1985
1986 data = hci_find_remote_oob_data(hdev, bdaddr);
1987 if (!data)
1988 return -ENOENT;
1989
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001990 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001991
1992 list_del(&data->list);
1993 kfree(data);
1994
1995 return 0;
1996}
1997
1998int hci_remote_oob_data_clear(struct hci_dev *hdev)
1999{
2000 struct oob_data *data, *n;
2001
2002 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2003 list_del(&data->list);
2004 kfree(data);
2005 }
2006
2007 return 0;
2008}
2009
2010int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002011 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002012{
2013 struct oob_data *data;
2014
2015 data = hci_find_remote_oob_data(hdev, bdaddr);
2016
2017 if (!data) {
2018 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2019 if (!data)
2020 return -ENOMEM;
2021
2022 bacpy(&data->bdaddr, bdaddr);
2023 list_add(&data->list, &hdev->remote_oob_data);
2024 }
2025
2026 memcpy(data->hash, hash, sizeof(data->hash));
2027 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2028
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002029 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002030
2031 return 0;
2032}
2033
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002034struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002035{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002036 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002037
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002038 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002039 if (bacmp(bdaddr, &b->bdaddr) == 0)
2040 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002041
2042 return NULL;
2043}
2044
2045int hci_blacklist_clear(struct hci_dev *hdev)
2046{
2047 struct list_head *p, *n;
2048
2049 list_for_each_safe(p, n, &hdev->blacklist) {
2050 struct bdaddr_list *b;
2051
2052 b = list_entry(p, struct bdaddr_list, list);
2053
2054 list_del(p);
2055 kfree(b);
2056 }
2057
2058 return 0;
2059}
2060
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002061int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002062{
2063 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002064
2065 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2066 return -EBADF;
2067
Antti Julku5e762442011-08-25 16:48:02 +03002068 if (hci_blacklist_lookup(hdev, bdaddr))
2069 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002070
2071 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002072 if (!entry)
2073 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002074
2075 bacpy(&entry->bdaddr, bdaddr);
2076
2077 list_add(&entry->list, &hdev->blacklist);
2078
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002079 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002080}
2081
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002082int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002083{
2084 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002085
Szymon Janc1ec918c2011-11-16 09:32:21 +01002086 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002087 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002088
2089 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002090 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002091 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002092
2093 list_del(&entry->list);
2094 kfree(entry);
2095
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002096 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002097}
2098
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002099static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002100{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002101 if (status) {
2102 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002103
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002104 hci_dev_lock(hdev);
2105 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2106 hci_dev_unlock(hdev);
2107 return;
2108 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002109}
2110
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002111static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002112{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002113 /* General inquiry access code (GIAC) */
2114 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2115 struct hci_request req;
2116 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002117 int err;
2118
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002119 if (status) {
2120 BT_ERR("Failed to disable LE scanning: status %d", status);
2121 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002122 }
2123
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002124 switch (hdev->discovery.type) {
2125 case DISCOV_TYPE_LE:
2126 hci_dev_lock(hdev);
2127 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2128 hci_dev_unlock(hdev);
2129 break;
2130
2131 case DISCOV_TYPE_INTERLEAVED:
2132 hci_req_init(&req, hdev);
2133
2134 memset(&cp, 0, sizeof(cp));
2135 memcpy(&cp.lap, lap, sizeof(cp.lap));
2136 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2137 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2138
2139 hci_dev_lock(hdev);
2140
2141 hci_inquiry_cache_flush(hdev);
2142
2143 err = hci_req_run(&req, inquiry_complete);
2144 if (err) {
2145 BT_ERR("Inquiry request failed: err %d", err);
2146 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2147 }
2148
2149 hci_dev_unlock(hdev);
2150 break;
2151 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002152}
2153
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002154static void le_scan_disable_work(struct work_struct *work)
2155{
2156 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002157 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002158 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002159 struct hci_request req;
2160 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002161
2162 BT_DBG("%s", hdev->name);
2163
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002164 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002165
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002166 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002167 cp.enable = LE_SCAN_DISABLE;
2168 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002169
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002170 err = hci_req_run(&req, le_scan_disable_work_complete);
2171 if (err)
2172 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002173}
2174
David Herrmann9be0dab2012-04-22 14:39:57 +02002175/* Alloc HCI device */
2176struct hci_dev *hci_alloc_dev(void)
2177{
2178 struct hci_dev *hdev;
2179
2180 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2181 if (!hdev)
2182 return NULL;
2183
David Herrmannb1b813d2012-04-22 14:39:58 +02002184 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2185 hdev->esco_type = (ESCO_HV1);
2186 hdev->link_mode = (HCI_LM_ACCEPT);
2187 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002188 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2189 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002190
David Herrmannb1b813d2012-04-22 14:39:58 +02002191 hdev->sniff_max_interval = 800;
2192 hdev->sniff_min_interval = 80;
2193
2194 mutex_init(&hdev->lock);
2195 mutex_init(&hdev->req_lock);
2196
2197 INIT_LIST_HEAD(&hdev->mgmt_pending);
2198 INIT_LIST_HEAD(&hdev->blacklist);
2199 INIT_LIST_HEAD(&hdev->uuids);
2200 INIT_LIST_HEAD(&hdev->link_keys);
2201 INIT_LIST_HEAD(&hdev->long_term_keys);
2202 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002203 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002204
2205 INIT_WORK(&hdev->rx_work, hci_rx_work);
2206 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2207 INIT_WORK(&hdev->tx_work, hci_tx_work);
2208 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002209
David Herrmannb1b813d2012-04-22 14:39:58 +02002210 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2211 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2212 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2213
David Herrmannb1b813d2012-04-22 14:39:58 +02002214 skb_queue_head_init(&hdev->rx_q);
2215 skb_queue_head_init(&hdev->cmd_q);
2216 skb_queue_head_init(&hdev->raw_q);
2217
2218 init_waitqueue_head(&hdev->req_wait_q);
2219
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002220 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002221
David Herrmannb1b813d2012-04-22 14:39:58 +02002222 hci_init_sysfs(hdev);
2223 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002224
2225 return hdev;
2226}
2227EXPORT_SYMBOL(hci_alloc_dev);
2228
2229/* Free HCI device */
2230void hci_free_dev(struct hci_dev *hdev)
2231{
David Herrmann9be0dab2012-04-22 14:39:57 +02002232 /* will free via device release */
2233 put_device(&hdev->dev);
2234}
2235EXPORT_SYMBOL(hci_free_dev);
2236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237/* Register HCI device */
2238int hci_register_dev(struct hci_dev *hdev)
2239{
David Herrmannb1b813d2012-04-22 14:39:58 +02002240 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
David Herrmann010666a2012-01-07 15:47:07 +01002242 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 return -EINVAL;
2244
Mat Martineau08add512011-11-02 16:18:36 -07002245 /* Do not allow HCI_AMP devices to register at index 0,
2246 * so the index can be used as the AMP controller ID.
2247 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002248 switch (hdev->dev_type) {
2249 case HCI_BREDR:
2250 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2251 break;
2252 case HCI_AMP:
2253 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2254 break;
2255 default:
2256 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002258
Sasha Levin3df92b32012-05-27 22:36:56 +02002259 if (id < 0)
2260 return id;
2261
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 sprintf(hdev->name, "hci%d", id);
2263 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002264
2265 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2266
Kees Cookd8537542013-07-03 15:04:57 -07002267 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2268 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002269 if (!hdev->workqueue) {
2270 error = -ENOMEM;
2271 goto err;
2272 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002273
Kees Cookd8537542013-07-03 15:04:57 -07002274 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2275 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002276 if (!hdev->req_workqueue) {
2277 destroy_workqueue(hdev->workqueue);
2278 error = -ENOMEM;
2279 goto err;
2280 }
2281
David Herrmann33ca9542011-10-08 14:58:49 +02002282 error = hci_add_sysfs(hdev);
2283 if (error < 0)
2284 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002286 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002287 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2288 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002289 if (hdev->rfkill) {
2290 if (rfkill_register(hdev->rfkill) < 0) {
2291 rfkill_destroy(hdev->rfkill);
2292 hdev->rfkill = NULL;
2293 }
2294 }
2295
Johan Hedberg5e130362013-09-13 08:58:17 +03002296 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2297 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2298
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002299 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002300
2301 if (hdev->dev_type != HCI_AMP)
2302 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2303
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002304 write_lock(&hci_dev_list_lock);
2305 list_add(&hdev->list, &hci_dev_list);
2306 write_unlock(&hci_dev_list_lock);
2307
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002309 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
Johan Hedberg19202572013-01-14 22:33:51 +02002311 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002312
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002314
David Herrmann33ca9542011-10-08 14:58:49 +02002315err_wqueue:
2316 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002317 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002318err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002319 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002320
David Herrmann33ca9542011-10-08 14:58:49 +02002321 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322}
2323EXPORT_SYMBOL(hci_register_dev);
2324
2325/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002326void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327{
Sasha Levin3df92b32012-05-27 22:36:56 +02002328 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002329
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002330 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
Johan Hovold94324962012-03-15 14:48:41 +01002332 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2333
Sasha Levin3df92b32012-05-27 22:36:56 +02002334 id = hdev->id;
2335
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002336 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002338 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
2340 hci_dev_do_close(hdev);
2341
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302342 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002343 kfree_skb(hdev->reassembly[i]);
2344
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002345 cancel_work_sync(&hdev->power_on);
2346
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002347 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002348 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002349 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002350 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002351 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002352 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002353
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002354 /* mgmt_index_removed should take care of emptying the
2355 * pending list */
2356 BUG_ON(!list_empty(&hdev->mgmt_pending));
2357
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 hci_notify(hdev, HCI_DEV_UNREG);
2359
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002360 if (hdev->rfkill) {
2361 rfkill_unregister(hdev->rfkill);
2362 rfkill_destroy(hdev->rfkill);
2363 }
2364
David Herrmannce242972011-10-08 14:58:48 +02002365 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002366
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002367 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002368 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002369
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002370 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002371 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002372 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002373 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002374 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002375 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002376 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002377
David Herrmanndc946bd2012-01-07 15:47:24 +01002378 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002379
2380 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381}
2382EXPORT_SYMBOL(hci_unregister_dev);
2383
2384/* Suspend HCI device */
2385int hci_suspend_dev(struct hci_dev *hdev)
2386{
2387 hci_notify(hdev, HCI_DEV_SUSPEND);
2388 return 0;
2389}
2390EXPORT_SYMBOL(hci_suspend_dev);
2391
2392/* Resume HCI device */
2393int hci_resume_dev(struct hci_dev *hdev)
2394{
2395 hci_notify(hdev, HCI_DEV_RESUME);
2396 return 0;
2397}
2398EXPORT_SYMBOL(hci_resume_dev);
2399
Marcel Holtmann76bca882009-11-18 00:40:39 +01002400/* Receive frame from HCI drivers */
2401int hci_recv_frame(struct sk_buff *skb)
2402{
2403 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2404 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002405 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002406 kfree_skb(skb);
2407 return -ENXIO;
2408 }
2409
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002410 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002411 bt_cb(skb)->incoming = 1;
2412
2413 /* Time stamp */
2414 __net_timestamp(skb);
2415
Marcel Holtmann76bca882009-11-18 00:40:39 +01002416 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002417 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002418
Marcel Holtmann76bca882009-11-18 00:40:39 +01002419 return 0;
2420}
2421EXPORT_SYMBOL(hci_recv_frame);
2422
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302423static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002424 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302425{
2426 int len = 0;
2427 int hlen = 0;
2428 int remain = count;
2429 struct sk_buff *skb;
2430 struct bt_skb_cb *scb;
2431
2432 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002433 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302434 return -EILSEQ;
2435
2436 skb = hdev->reassembly[index];
2437
2438 if (!skb) {
2439 switch (type) {
2440 case HCI_ACLDATA_PKT:
2441 len = HCI_MAX_FRAME_SIZE;
2442 hlen = HCI_ACL_HDR_SIZE;
2443 break;
2444 case HCI_EVENT_PKT:
2445 len = HCI_MAX_EVENT_SIZE;
2446 hlen = HCI_EVENT_HDR_SIZE;
2447 break;
2448 case HCI_SCODATA_PKT:
2449 len = HCI_MAX_SCO_SIZE;
2450 hlen = HCI_SCO_HDR_SIZE;
2451 break;
2452 }
2453
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002454 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302455 if (!skb)
2456 return -ENOMEM;
2457
2458 scb = (void *) skb->cb;
2459 scb->expect = hlen;
2460 scb->pkt_type = type;
2461
2462 skb->dev = (void *) hdev;
2463 hdev->reassembly[index] = skb;
2464 }
2465
2466 while (count) {
2467 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002468 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302469
2470 memcpy(skb_put(skb, len), data, len);
2471
2472 count -= len;
2473 data += len;
2474 scb->expect -= len;
2475 remain = count;
2476
2477 switch (type) {
2478 case HCI_EVENT_PKT:
2479 if (skb->len == HCI_EVENT_HDR_SIZE) {
2480 struct hci_event_hdr *h = hci_event_hdr(skb);
2481 scb->expect = h->plen;
2482
2483 if (skb_tailroom(skb) < scb->expect) {
2484 kfree_skb(skb);
2485 hdev->reassembly[index] = NULL;
2486 return -ENOMEM;
2487 }
2488 }
2489 break;
2490
2491 case HCI_ACLDATA_PKT:
2492 if (skb->len == HCI_ACL_HDR_SIZE) {
2493 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2494 scb->expect = __le16_to_cpu(h->dlen);
2495
2496 if (skb_tailroom(skb) < scb->expect) {
2497 kfree_skb(skb);
2498 hdev->reassembly[index] = NULL;
2499 return -ENOMEM;
2500 }
2501 }
2502 break;
2503
2504 case HCI_SCODATA_PKT:
2505 if (skb->len == HCI_SCO_HDR_SIZE) {
2506 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2507 scb->expect = h->dlen;
2508
2509 if (skb_tailroom(skb) < scb->expect) {
2510 kfree_skb(skb);
2511 hdev->reassembly[index] = NULL;
2512 return -ENOMEM;
2513 }
2514 }
2515 break;
2516 }
2517
2518 if (scb->expect == 0) {
2519 /* Complete frame */
2520
2521 bt_cb(skb)->pkt_type = type;
2522 hci_recv_frame(skb);
2523
2524 hdev->reassembly[index] = NULL;
2525 return remain;
2526 }
2527 }
2528
2529 return remain;
2530}
2531
Marcel Holtmannef222012007-07-11 06:42:04 +02002532int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2533{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302534 int rem = 0;
2535
Marcel Holtmannef222012007-07-11 06:42:04 +02002536 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2537 return -EILSEQ;
2538
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002539 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002540 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302541 if (rem < 0)
2542 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002543
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302544 data += (count - rem);
2545 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002546 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002547
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302548 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002549}
2550EXPORT_SYMBOL(hci_recv_fragment);
2551
Suraj Sumangala99811512010-07-14 13:02:19 +05302552#define STREAM_REASSEMBLY 0
2553
2554int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2555{
2556 int type;
2557 int rem = 0;
2558
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002559 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302560 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2561
2562 if (!skb) {
2563 struct { char type; } *pkt;
2564
2565 /* Start of the frame */
2566 pkt = data;
2567 type = pkt->type;
2568
2569 data++;
2570 count--;
2571 } else
2572 type = bt_cb(skb)->pkt_type;
2573
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002574 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002575 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302576 if (rem < 0)
2577 return rem;
2578
2579 data += (count - rem);
2580 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002581 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302582
2583 return rem;
2584}
2585EXPORT_SYMBOL(hci_recv_stream_fragment);
2586
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587/* ---- Interface to upper protocols ---- */
2588
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589int hci_register_cb(struct hci_cb *cb)
2590{
2591 BT_DBG("%p name %s", cb, cb->name);
2592
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002593 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002595 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
2597 return 0;
2598}
2599EXPORT_SYMBOL(hci_register_cb);
2600
2601int hci_unregister_cb(struct hci_cb *cb)
2602{
2603 BT_DBG("%p name %s", cb, cb->name);
2604
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002605 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002607 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
2609 return 0;
2610}
2611EXPORT_SYMBOL(hci_unregister_cb);
2612
2613static int hci_send_frame(struct sk_buff *skb)
2614{
2615 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2616
2617 if (!hdev) {
2618 kfree_skb(skb);
2619 return -ENODEV;
2620 }
2621
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002622 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002624 /* Time stamp */
2625 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002627 /* Send copy to monitor */
2628 hci_send_to_monitor(hdev, skb);
2629
2630 if (atomic_read(&hdev->promisc)) {
2631 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002632 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 }
2634
2635 /* Get rid of skb owner, prior to sending to the driver. */
2636 skb_orphan(skb);
2637
2638 return hdev->send(skb);
2639}
2640
Johan Hedberg3119ae92013-03-05 20:37:44 +02002641void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2642{
2643 skb_queue_head_init(&req->cmd_q);
2644 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002645 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002646}
2647
2648int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2649{
2650 struct hci_dev *hdev = req->hdev;
2651 struct sk_buff *skb;
2652 unsigned long flags;
2653
2654 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2655
Andre Guedes5d73e032013-03-08 11:20:16 -03002656 /* If an error occured during request building, remove all HCI
2657 * commands queued on the HCI request queue.
2658 */
2659 if (req->err) {
2660 skb_queue_purge(&req->cmd_q);
2661 return req->err;
2662 }
2663
Johan Hedberg3119ae92013-03-05 20:37:44 +02002664 /* Do not allow empty requests */
2665 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002666 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002667
2668 skb = skb_peek_tail(&req->cmd_q);
2669 bt_cb(skb)->req.complete = complete;
2670
2671 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2672 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2673 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2674
2675 queue_work(hdev->workqueue, &hdev->cmd_work);
2676
2677 return 0;
2678}
2679
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002680static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002681 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682{
2683 int len = HCI_COMMAND_HDR_SIZE + plen;
2684 struct hci_command_hdr *hdr;
2685 struct sk_buff *skb;
2686
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002688 if (!skb)
2689 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
2691 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002692 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 hdr->plen = plen;
2694
2695 if (plen)
2696 memcpy(skb_put(skb, plen), param, plen);
2697
2698 BT_DBG("skb len %d", skb->len);
2699
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002700 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002702
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002703 return skb;
2704}
2705
2706/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002707int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2708 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002709{
2710 struct sk_buff *skb;
2711
2712 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2713
2714 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2715 if (!skb) {
2716 BT_ERR("%s no memory for command", hdev->name);
2717 return -ENOMEM;
2718 }
2719
Johan Hedberg11714b32013-03-05 20:37:47 +02002720 /* Stand-alone HCI commands must be flaged as
2721 * single-command requests.
2722 */
2723 bt_cb(skb)->req.start = true;
2724
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002726 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727
2728 return 0;
2729}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730
Johan Hedberg71c76a12013-03-05 20:37:46 +02002731/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002732void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2733 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002734{
2735 struct hci_dev *hdev = req->hdev;
2736 struct sk_buff *skb;
2737
2738 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2739
Andre Guedes34739c12013-03-08 11:20:18 -03002740 /* If an error occured during request building, there is no point in
2741 * queueing the HCI command. We can simply return.
2742 */
2743 if (req->err)
2744 return;
2745
Johan Hedberg71c76a12013-03-05 20:37:46 +02002746 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2747 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002748 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2749 hdev->name, opcode);
2750 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002751 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002752 }
2753
2754 if (skb_queue_empty(&req->cmd_q))
2755 bt_cb(skb)->req.start = true;
2756
Johan Hedberg02350a72013-04-03 21:50:29 +03002757 bt_cb(skb)->req.event = event;
2758
Johan Hedberg71c76a12013-03-05 20:37:46 +02002759 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002760}
2761
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002762void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2763 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002764{
2765 hci_req_add_ev(req, opcode, plen, param, 0);
2766}
2767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002769void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770{
2771 struct hci_command_hdr *hdr;
2772
2773 if (!hdev->sent_cmd)
2774 return NULL;
2775
2776 hdr = (void *) hdev->sent_cmd->data;
2777
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002778 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 return NULL;
2780
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002781 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
2783 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2784}
2785
2786/* Send ACL data */
2787static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2788{
2789 struct hci_acl_hdr *hdr;
2790 int len = skb->len;
2791
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002792 skb_push(skb, HCI_ACL_HDR_SIZE);
2793 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002794 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002795 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2796 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797}
2798
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002799static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002800 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002802 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 struct hci_dev *hdev = conn->hdev;
2804 struct sk_buff *list;
2805
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002806 skb->len = skb_headlen(skb);
2807 skb->data_len = 0;
2808
2809 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002810
2811 switch (hdev->dev_type) {
2812 case HCI_BREDR:
2813 hci_add_acl_hdr(skb, conn->handle, flags);
2814 break;
2815 case HCI_AMP:
2816 hci_add_acl_hdr(skb, chan->handle, flags);
2817 break;
2818 default:
2819 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2820 return;
2821 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002822
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002823 list = skb_shinfo(skb)->frag_list;
2824 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 /* Non fragmented */
2826 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2827
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002828 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 } else {
2830 /* Fragmented */
2831 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2832
2833 skb_shinfo(skb)->frag_list = NULL;
2834
2835 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002836 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002838 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002839
2840 flags &= ~ACL_START;
2841 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 do {
2843 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002844
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002846 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002847 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848
2849 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2850
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002851 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 } while (list);
2853
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002854 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002856}
2857
2858void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2859{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002860 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002861
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002862 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002863
2864 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002865
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002866 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002868 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870
2871/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002872void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873{
2874 struct hci_dev *hdev = conn->hdev;
2875 struct hci_sco_hdr hdr;
2876
2877 BT_DBG("%s len %d", hdev->name, skb->len);
2878
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002879 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 hdr.dlen = skb->len;
2881
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002882 skb_push(skb, HCI_SCO_HDR_SIZE);
2883 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002884 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885
2886 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002887 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002888
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002890 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892
2893/* ---- HCI TX task (outgoing data) ---- */
2894
2895/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002896static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2897 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898{
2899 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002900 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002901 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002903 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002905
2906 rcu_read_lock();
2907
2908 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002909 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002911
2912 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2913 continue;
2914
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 num++;
2916
2917 if (c->sent < min) {
2918 min = c->sent;
2919 conn = c;
2920 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002921
2922 if (hci_conn_num(hdev, type) == num)
2923 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 }
2925
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002926 rcu_read_unlock();
2927
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002929 int cnt, q;
2930
2931 switch (conn->type) {
2932 case ACL_LINK:
2933 cnt = hdev->acl_cnt;
2934 break;
2935 case SCO_LINK:
2936 case ESCO_LINK:
2937 cnt = hdev->sco_cnt;
2938 break;
2939 case LE_LINK:
2940 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2941 break;
2942 default:
2943 cnt = 0;
2944 BT_ERR("Unknown link type");
2945 }
2946
2947 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 *quote = q ? q : 1;
2949 } else
2950 *quote = 0;
2951
2952 BT_DBG("conn %p quote %d", conn, *quote);
2953 return conn;
2954}
2955
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002956static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957{
2958 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002959 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960
Ville Tervobae1f5d92011-02-10 22:38:53 -03002961 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002963 rcu_read_lock();
2964
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002966 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002967 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002968 BT_ERR("%s killing stalled connection %pMR",
2969 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002970 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 }
2972 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002973
2974 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975}
2976
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002977static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2978 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002979{
2980 struct hci_conn_hash *h = &hdev->conn_hash;
2981 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002982 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002983 struct hci_conn *conn;
2984 int cnt, q, conn_num = 0;
2985
2986 BT_DBG("%s", hdev->name);
2987
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002988 rcu_read_lock();
2989
2990 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002991 struct hci_chan *tmp;
2992
2993 if (conn->type != type)
2994 continue;
2995
2996 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2997 continue;
2998
2999 conn_num++;
3000
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003001 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003002 struct sk_buff *skb;
3003
3004 if (skb_queue_empty(&tmp->data_q))
3005 continue;
3006
3007 skb = skb_peek(&tmp->data_q);
3008 if (skb->priority < cur_prio)
3009 continue;
3010
3011 if (skb->priority > cur_prio) {
3012 num = 0;
3013 min = ~0;
3014 cur_prio = skb->priority;
3015 }
3016
3017 num++;
3018
3019 if (conn->sent < min) {
3020 min = conn->sent;
3021 chan = tmp;
3022 }
3023 }
3024
3025 if (hci_conn_num(hdev, type) == conn_num)
3026 break;
3027 }
3028
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003029 rcu_read_unlock();
3030
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003031 if (!chan)
3032 return NULL;
3033
3034 switch (chan->conn->type) {
3035 case ACL_LINK:
3036 cnt = hdev->acl_cnt;
3037 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003038 case AMP_LINK:
3039 cnt = hdev->block_cnt;
3040 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003041 case SCO_LINK:
3042 case ESCO_LINK:
3043 cnt = hdev->sco_cnt;
3044 break;
3045 case LE_LINK:
3046 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3047 break;
3048 default:
3049 cnt = 0;
3050 BT_ERR("Unknown link type");
3051 }
3052
3053 q = cnt / num;
3054 *quote = q ? q : 1;
3055 BT_DBG("chan %p quote %d", chan, *quote);
3056 return chan;
3057}
3058
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003059static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3060{
3061 struct hci_conn_hash *h = &hdev->conn_hash;
3062 struct hci_conn *conn;
3063 int num = 0;
3064
3065 BT_DBG("%s", hdev->name);
3066
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003067 rcu_read_lock();
3068
3069 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003070 struct hci_chan *chan;
3071
3072 if (conn->type != type)
3073 continue;
3074
3075 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3076 continue;
3077
3078 num++;
3079
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003080 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003081 struct sk_buff *skb;
3082
3083 if (chan->sent) {
3084 chan->sent = 0;
3085 continue;
3086 }
3087
3088 if (skb_queue_empty(&chan->data_q))
3089 continue;
3090
3091 skb = skb_peek(&chan->data_q);
3092 if (skb->priority >= HCI_PRIO_MAX - 1)
3093 continue;
3094
3095 skb->priority = HCI_PRIO_MAX - 1;
3096
3097 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003098 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003099 }
3100
3101 if (hci_conn_num(hdev, type) == num)
3102 break;
3103 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003104
3105 rcu_read_unlock();
3106
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003107}
3108
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003109static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3110{
3111 /* Calculate count of blocks used by this packet */
3112 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3113}
3114
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003115static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 if (!test_bit(HCI_RAW, &hdev->flags)) {
3118 /* ACL tx timeout must be longer than maximum
3119 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003120 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003121 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003122 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003124}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003126static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003127{
3128 unsigned int cnt = hdev->acl_cnt;
3129 struct hci_chan *chan;
3130 struct sk_buff *skb;
3131 int quote;
3132
3133 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003134
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003135 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003136 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003137 u32 priority = (skb_peek(&chan->data_q))->priority;
3138 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003139 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003140 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003141
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003142 /* Stop if priority has changed */
3143 if (skb->priority < priority)
3144 break;
3145
3146 skb = skb_dequeue(&chan->data_q);
3147
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003148 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003149 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003150
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 hci_send_frame(skb);
3152 hdev->acl_last_tx = jiffies;
3153
3154 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003155 chan->sent++;
3156 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 }
3158 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003159
3160 if (cnt != hdev->acl_cnt)
3161 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162}
3163
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003164static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003165{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003166 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003167 struct hci_chan *chan;
3168 struct sk_buff *skb;
3169 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003170 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003171
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003172 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003173
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003174 BT_DBG("%s", hdev->name);
3175
3176 if (hdev->dev_type == HCI_AMP)
3177 type = AMP_LINK;
3178 else
3179 type = ACL_LINK;
3180
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003181 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003182 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003183 u32 priority = (skb_peek(&chan->data_q))->priority;
3184 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3185 int blocks;
3186
3187 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003188 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003189
3190 /* Stop if priority has changed */
3191 if (skb->priority < priority)
3192 break;
3193
3194 skb = skb_dequeue(&chan->data_q);
3195
3196 blocks = __get_blocks(hdev, skb);
3197 if (blocks > hdev->block_cnt)
3198 return;
3199
3200 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003201 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003202
3203 hci_send_frame(skb);
3204 hdev->acl_last_tx = jiffies;
3205
3206 hdev->block_cnt -= blocks;
3207 quote -= blocks;
3208
3209 chan->sent += blocks;
3210 chan->conn->sent += blocks;
3211 }
3212 }
3213
3214 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003215 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003216}
3217
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003218static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003219{
3220 BT_DBG("%s", hdev->name);
3221
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003222 /* No ACL link over BR/EDR controller */
3223 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3224 return;
3225
3226 /* No AMP link over AMP controller */
3227 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003228 return;
3229
3230 switch (hdev->flow_ctl_mode) {
3231 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3232 hci_sched_acl_pkt(hdev);
3233 break;
3234
3235 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3236 hci_sched_acl_blk(hdev);
3237 break;
3238 }
3239}
3240
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003242static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243{
3244 struct hci_conn *conn;
3245 struct sk_buff *skb;
3246 int quote;
3247
3248 BT_DBG("%s", hdev->name);
3249
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003250 if (!hci_conn_num(hdev, SCO_LINK))
3251 return;
3252
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3254 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3255 BT_DBG("skb %p len %d", skb, skb->len);
3256 hci_send_frame(skb);
3257
3258 conn->sent++;
3259 if (conn->sent == ~0)
3260 conn->sent = 0;
3261 }
3262 }
3263}
3264
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003265static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003266{
3267 struct hci_conn *conn;
3268 struct sk_buff *skb;
3269 int quote;
3270
3271 BT_DBG("%s", hdev->name);
3272
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003273 if (!hci_conn_num(hdev, ESCO_LINK))
3274 return;
3275
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003276 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3277 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003278 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3279 BT_DBG("skb %p len %d", skb, skb->len);
3280 hci_send_frame(skb);
3281
3282 conn->sent++;
3283 if (conn->sent == ~0)
3284 conn->sent = 0;
3285 }
3286 }
3287}
3288
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003289static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003290{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003291 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003292 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003293 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003294
3295 BT_DBG("%s", hdev->name);
3296
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003297 if (!hci_conn_num(hdev, LE_LINK))
3298 return;
3299
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003300 if (!test_bit(HCI_RAW, &hdev->flags)) {
3301 /* LE tx timeout must be longer than maximum
3302 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003303 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003304 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003305 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003306 }
3307
3308 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003309 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003310 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003311 u32 priority = (skb_peek(&chan->data_q))->priority;
3312 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003313 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003314 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003315
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003316 /* Stop if priority has changed */
3317 if (skb->priority < priority)
3318 break;
3319
3320 skb = skb_dequeue(&chan->data_q);
3321
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003322 hci_send_frame(skb);
3323 hdev->le_last_tx = jiffies;
3324
3325 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003326 chan->sent++;
3327 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003328 }
3329 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003330
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003331 if (hdev->le_pkts)
3332 hdev->le_cnt = cnt;
3333 else
3334 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003335
3336 if (cnt != tmp)
3337 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003338}
3339
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003340static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003342 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 struct sk_buff *skb;
3344
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003345 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003346 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347
Marcel Holtmann52de5992013-09-03 18:08:38 -07003348 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3349 /* Schedule queues and send stuff to HCI driver */
3350 hci_sched_acl(hdev);
3351 hci_sched_sco(hdev);
3352 hci_sched_esco(hdev);
3353 hci_sched_le(hdev);
3354 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003355
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 /* Send next queued raw (unknown type) packet */
3357 while ((skb = skb_dequeue(&hdev->raw_q)))
3358 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359}
3360
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003361/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362
3363/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003364static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365{
3366 struct hci_acl_hdr *hdr = (void *) skb->data;
3367 struct hci_conn *conn;
3368 __u16 handle, flags;
3369
3370 skb_pull(skb, HCI_ACL_HDR_SIZE);
3371
3372 handle = __le16_to_cpu(hdr->handle);
3373 flags = hci_flags(handle);
3374 handle = hci_handle(handle);
3375
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003376 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003377 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378
3379 hdev->stat.acl_rx++;
3380
3381 hci_dev_lock(hdev);
3382 conn = hci_conn_hash_lookup_handle(hdev, handle);
3383 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003384
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003386 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003387
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003389 l2cap_recv_acldata(conn, skb, flags);
3390 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003392 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003393 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 }
3395
3396 kfree_skb(skb);
3397}
3398
3399/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003400static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401{
3402 struct hci_sco_hdr *hdr = (void *) skb->data;
3403 struct hci_conn *conn;
3404 __u16 handle;
3405
3406 skb_pull(skb, HCI_SCO_HDR_SIZE);
3407
3408 handle = __le16_to_cpu(hdr->handle);
3409
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003410 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411
3412 hdev->stat.sco_rx++;
3413
3414 hci_dev_lock(hdev);
3415 conn = hci_conn_hash_lookup_handle(hdev, handle);
3416 hci_dev_unlock(hdev);
3417
3418 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003420 sco_recv_scodata(conn, skb);
3421 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003423 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003424 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 }
3426
3427 kfree_skb(skb);
3428}
3429
Johan Hedberg9238f362013-03-05 20:37:48 +02003430static bool hci_req_is_complete(struct hci_dev *hdev)
3431{
3432 struct sk_buff *skb;
3433
3434 skb = skb_peek(&hdev->cmd_q);
3435 if (!skb)
3436 return true;
3437
3438 return bt_cb(skb)->req.start;
3439}
3440
Johan Hedberg42c6b122013-03-05 20:37:49 +02003441static void hci_resend_last(struct hci_dev *hdev)
3442{
3443 struct hci_command_hdr *sent;
3444 struct sk_buff *skb;
3445 u16 opcode;
3446
3447 if (!hdev->sent_cmd)
3448 return;
3449
3450 sent = (void *) hdev->sent_cmd->data;
3451 opcode = __le16_to_cpu(sent->opcode);
3452 if (opcode == HCI_OP_RESET)
3453 return;
3454
3455 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3456 if (!skb)
3457 return;
3458
3459 skb_queue_head(&hdev->cmd_q, skb);
3460 queue_work(hdev->workqueue, &hdev->cmd_work);
3461}
3462
Johan Hedberg9238f362013-03-05 20:37:48 +02003463void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3464{
3465 hci_req_complete_t req_complete = NULL;
3466 struct sk_buff *skb;
3467 unsigned long flags;
3468
3469 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3470
Johan Hedberg42c6b122013-03-05 20:37:49 +02003471 /* If the completed command doesn't match the last one that was
3472 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003473 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003474 if (!hci_sent_cmd_data(hdev, opcode)) {
3475 /* Some CSR based controllers generate a spontaneous
3476 * reset complete event during init and any pending
3477 * command will never be completed. In such a case we
3478 * need to resend whatever was the last sent
3479 * command.
3480 */
3481 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3482 hci_resend_last(hdev);
3483
Johan Hedberg9238f362013-03-05 20:37:48 +02003484 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003485 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003486
3487 /* If the command succeeded and there's still more commands in
3488 * this request the request is not yet complete.
3489 */
3490 if (!status && !hci_req_is_complete(hdev))
3491 return;
3492
3493 /* If this was the last command in a request the complete
3494 * callback would be found in hdev->sent_cmd instead of the
3495 * command queue (hdev->cmd_q).
3496 */
3497 if (hdev->sent_cmd) {
3498 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003499
3500 if (req_complete) {
3501 /* We must set the complete callback to NULL to
3502 * avoid calling the callback more than once if
3503 * this function gets called again.
3504 */
3505 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3506
Johan Hedberg9238f362013-03-05 20:37:48 +02003507 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003508 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003509 }
3510
3511 /* Remove all pending commands belonging to this request */
3512 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3513 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3514 if (bt_cb(skb)->req.start) {
3515 __skb_queue_head(&hdev->cmd_q, skb);
3516 break;
3517 }
3518
3519 req_complete = bt_cb(skb)->req.complete;
3520 kfree_skb(skb);
3521 }
3522 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3523
3524call_complete:
3525 if (req_complete)
3526 req_complete(hdev, status);
3527}
3528
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003529static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003531 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 struct sk_buff *skb;
3533
3534 BT_DBG("%s", hdev->name);
3535
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003537 /* Send copy to monitor */
3538 hci_send_to_monitor(hdev, skb);
3539
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 if (atomic_read(&hdev->promisc)) {
3541 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003542 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 }
3544
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003545 if (test_bit(HCI_RAW, &hdev->flags) ||
3546 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 kfree_skb(skb);
3548 continue;
3549 }
3550
3551 if (test_bit(HCI_INIT, &hdev->flags)) {
3552 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003553 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554 case HCI_ACLDATA_PKT:
3555 case HCI_SCODATA_PKT:
3556 kfree_skb(skb);
3557 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559 }
3560
3561 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003562 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003564 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 hci_event_packet(hdev, skb);
3566 break;
3567
3568 case HCI_ACLDATA_PKT:
3569 BT_DBG("%s ACL data packet", hdev->name);
3570 hci_acldata_packet(hdev, skb);
3571 break;
3572
3573 case HCI_SCODATA_PKT:
3574 BT_DBG("%s SCO data packet", hdev->name);
3575 hci_scodata_packet(hdev, skb);
3576 break;
3577
3578 default:
3579 kfree_skb(skb);
3580 break;
3581 }
3582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583}
3584
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003585static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003587 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588 struct sk_buff *skb;
3589
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003590 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3591 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003594 if (atomic_read(&hdev->cmd_cnt)) {
3595 skb = skb_dequeue(&hdev->cmd_q);
3596 if (!skb)
3597 return;
3598
Wei Yongjun7585b972009-02-25 18:29:52 +08003599 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003601 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003602 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 atomic_dec(&hdev->cmd_cnt);
3604 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003605 if (test_bit(HCI_RESET, &hdev->flags))
3606 del_timer(&hdev->cmd_timer);
3607 else
3608 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003609 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 } else {
3611 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003612 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 }
3614 }
3615}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003616
Andre Guedes31f79562012-04-24 21:02:53 -03003617u8 bdaddr_to_le(u8 bdaddr_type)
3618{
3619 switch (bdaddr_type) {
3620 case BDADDR_LE_PUBLIC:
3621 return ADDR_LE_DEV_PUBLIC;
3622
3623 default:
3624 /* Fallback to LE Random address type */
3625 return ADDR_LE_DEV_RANDOM;
3626 }
3627}