blob: 3572611c0297ae22227c4983560800c3035ce4cf [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300522 else
523 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200524
525 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300530 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
531 * local supported commands HCI command.
532 */
533 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535
536 if (lmp_ssp_capable(hdev)) {
537 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
538 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
540 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541 } else {
542 struct hci_cp_write_eir cp;
543
544 memset(hdev->eir, 0, sizeof(hdev->eir));
545 memset(&cp, 0, sizeof(cp));
546
Johan Hedberg42c6b122013-03-05 20:37:49 +0200547 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200548 }
549 }
550
551 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200552 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553
554 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200556
557 if (lmp_ext_feat_capable(hdev)) {
558 struct hci_cp_read_local_ext_features cp;
559
560 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200561 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
562 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 }
564
565 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
566 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
568 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200569 }
570}
571
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575 struct hci_cp_write_def_link_policy cp;
576 u16 link_policy = 0;
577
578 if (lmp_rswitch_capable(hdev))
579 link_policy |= HCI_LP_RSWITCH;
580 if (lmp_hold_capable(hdev))
581 link_policy |= HCI_LP_HOLD;
582 if (lmp_sniff_capable(hdev))
583 link_policy |= HCI_LP_SNIFF;
584 if (lmp_park_capable(hdev))
585 link_policy |= HCI_LP_PARK;
586
587 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200588 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200589}
590
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200594 struct hci_cp_write_le_host_supported cp;
595
Johan Hedbergc73eee92013-04-19 18:35:21 +0300596 /* LE-only devices do not support explicit enablement */
597 if (!lmp_bredr_capable(hdev))
598 return;
599
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600 memset(&cp, 0, sizeof(cp));
601
602 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
603 cp.le = 0x01;
604 cp.simul = lmp_le_br_capable(hdev);
605 }
606
607 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200608 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
609 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610}
611
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300612static void hci_set_event_mask_page_2(struct hci_request *req)
613{
614 struct hci_dev *hdev = req->hdev;
615 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
616
617 /* If Connectionless Slave Broadcast master role is supported
618 * enable all necessary events for it.
619 */
620 if (hdev->features[2][0] & 0x01) {
621 events[1] |= 0x40; /* Triggered Clock Capture */
622 events[1] |= 0x80; /* Synchronization Train Complete */
623 events[2] |= 0x10; /* Slave Page Response Timeout */
624 events[2] |= 0x20; /* CSB Channel Map Change */
625 }
626
627 /* If Connectionless Slave Broadcast slave role is supported
628 * enable all necessary events for it.
629 */
630 if (hdev->features[2][0] & 0x02) {
631 events[2] |= 0x01; /* Synchronization Train Received */
632 events[2] |= 0x02; /* CSB Receive */
633 events[2] |= 0x04; /* CSB Timeout */
634 events[2] |= 0x08; /* Truncated Page Complete */
635 }
636
637 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
638}
639
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200641{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300643 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200644
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100645 /* Some Broadcom based Bluetooth controllers do not support the
646 * Delete Stored Link Key command. They are clearly indicating its
647 * absence in the bit mask of supported commands.
648 *
649 * Check the supported commands and only if the the command is marked
650 * as supported send it. If not supported assume that the controller
651 * does not have actual support for stored link keys which makes this
652 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700653 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300654 if (hdev->commands[6] & 0x80) {
655 struct hci_cp_delete_stored_link_key cp;
656
657 bacpy(&cp.bdaddr, BDADDR_ANY);
658 cp.delete_all = 0x01;
659 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
660 sizeof(cp), &cp);
661 }
662
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200665
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500666 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500668 hci_update_ad(req);
669 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300670
671 /* Read features beyond page 1 if available */
672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
673 struct hci_cp_read_local_ext_features cp;
674
675 cp.page = p;
676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
677 sizeof(cp), &cp);
678 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679}
680
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300681static void hci_init4_req(struct hci_request *req, unsigned long opt)
682{
683 struct hci_dev *hdev = req->hdev;
684
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300685 /* Set event mask page 2 if the HCI command for it is supported */
686 if (hdev->commands[22] & 0x04)
687 hci_set_event_mask_page_2(req);
688
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300689 /* Check for Synchronization Train support */
690 if (hdev->features[2][0] & 0x04)
691 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
692}
693
Johan Hedberg2177bab2013-03-05 20:37:43 +0200694static int __hci_init(struct hci_dev *hdev)
695{
696 int err;
697
698 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
699 if (err < 0)
700 return err;
701
702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
703 * BR/EDR/LE type controllers. AMP controllers only need the
704 * first stage init.
705 */
706 if (hdev->dev_type != HCI_BREDR)
707 return 0;
708
709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
710 if (err < 0)
711 return err;
712
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
714 if (err < 0)
715 return err;
716
717 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200718}
719
Johan Hedberg42c6b122013-03-05 20:37:49 +0200720static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
722 __u8 scan = opt;
723
Johan Hedberg42c6b122013-03-05 20:37:49 +0200724 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200727 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728}
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
732 __u8 auth = opt;
733
Johan Hedberg42c6b122013-03-05 20:37:49 +0200734 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738}
739
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
742 __u8 encrypt = opt;
743
Johan Hedberg42c6b122013-03-05 20:37:49 +0200744 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200746 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200747 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
749
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200751{
752 __le16 policy = cpu_to_le16(opt);
753
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200755
756 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200757 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200758}
759
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900760/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 * Device is held on return. */
762struct hci_dev *hci_dev_get(int index)
763{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200764 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766 BT_DBG("%d", index);
767
768 if (index < 0)
769 return NULL;
770
771 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200772 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 if (d->id == index) {
774 hdev = hci_dev_hold(d);
775 break;
776 }
777 }
778 read_unlock(&hci_dev_list_lock);
779 return hdev;
780}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200783
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200784bool hci_discovery_active(struct hci_dev *hdev)
785{
786 struct discovery_state *discov = &hdev->discovery;
787
Andre Guedes6fbe1952012-02-03 17:47:58 -0300788 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300789 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300790 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200791 return true;
792
Andre Guedes6fbe1952012-02-03 17:47:58 -0300793 default:
794 return false;
795 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200796}
797
Johan Hedbergff9ef572012-01-04 14:23:45 +0200798void hci_discovery_set_state(struct hci_dev *hdev, int state)
799{
800 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
801
802 if (hdev->discovery.state == state)
803 return;
804
805 switch (state) {
806 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300807 if (hdev->discovery.state != DISCOVERY_STARTING)
808 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200809 break;
810 case DISCOVERY_STARTING:
811 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300812 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200813 mgmt_discovering(hdev, 1);
814 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200815 case DISCOVERY_RESOLVING:
816 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200817 case DISCOVERY_STOPPING:
818 break;
819 }
820
821 hdev->discovery.state = state;
822}
823
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300824void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
Johan Hedberg30883512012-01-04 14:16:21 +0200826 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200827 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
Johan Hedberg561aafb2012-01-04 13:31:59 +0200829 list_for_each_entry_safe(p, n, &cache->all, all) {
830 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200831 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200833
834 INIT_LIST_HEAD(&cache->unknown);
835 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836}
837
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300838struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
839 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
Johan Hedberg30883512012-01-04 14:16:21 +0200841 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 struct inquiry_entry *e;
843
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300844 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Johan Hedberg561aafb2012-01-04 13:31:59 +0200846 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200848 return e;
849 }
850
851 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
Johan Hedberg561aafb2012-01-04 13:31:59 +0200854struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300855 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856{
Johan Hedberg30883512012-01-04 14:16:21 +0200857 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200858 struct inquiry_entry *e;
859
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300860 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200861
862 list_for_each_entry(e, &cache->unknown, list) {
863 if (!bacmp(&e->data.bdaddr, bdaddr))
864 return e;
865 }
866
867 return NULL;
868}
869
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200870struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300871 bdaddr_t *bdaddr,
872 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200873{
874 struct discovery_state *cache = &hdev->discovery;
875 struct inquiry_entry *e;
876
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300877 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200878
879 list_for_each_entry(e, &cache->resolve, list) {
880 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
881 return e;
882 if (!bacmp(&e->data.bdaddr, bdaddr))
883 return e;
884 }
885
886 return NULL;
887}
888
Johan Hedberga3d4e202012-01-09 00:53:02 +0200889void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300890 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200891{
892 struct discovery_state *cache = &hdev->discovery;
893 struct list_head *pos = &cache->resolve;
894 struct inquiry_entry *p;
895
896 list_del(&ie->list);
897
898 list_for_each_entry(p, &cache->resolve, list) {
899 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300900 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200901 break;
902 pos = &p->list;
903 }
904
905 list_add(&ie->list, pos);
906}
907
Johan Hedberg31754052012-01-04 13:39:52 +0200908bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300909 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910{
Johan Hedberg30883512012-01-04 14:16:21 +0200911 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200912 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300914 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
Szymon Janc2b2fec42012-11-20 11:38:54 +0100916 hci_remove_remote_oob_data(hdev, &data->bdaddr);
917
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200918 if (ssp)
919 *ssp = data->ssp_mode;
920
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200921 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200922 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200923 if (ie->data.ssp_mode && ssp)
924 *ssp = true;
925
Johan Hedberga3d4e202012-01-09 00:53:02 +0200926 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300927 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200928 ie->data.rssi = data->rssi;
929 hci_inquiry_cache_update_resolve(hdev, ie);
930 }
931
Johan Hedberg561aafb2012-01-04 13:31:59 +0200932 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200933 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200934
Johan Hedberg561aafb2012-01-04 13:31:59 +0200935 /* Entry not in the cache. Add new one. */
936 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
937 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200938 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200939
940 list_add(&ie->all, &cache->all);
941
942 if (name_known) {
943 ie->name_state = NAME_KNOWN;
944 } else {
945 ie->name_state = NAME_NOT_KNOWN;
946 list_add(&ie->list, &cache->unknown);
947 }
948
949update:
950 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300951 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200952 ie->name_state = NAME_KNOWN;
953 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 }
955
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200956 memcpy(&ie->data, data, sizeof(*data));
957 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200959
960 if (ie->name_state == NAME_NOT_KNOWN)
961 return false;
962
963 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964}
965
966static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
967{
Johan Hedberg30883512012-01-04 14:16:21 +0200968 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 struct inquiry_info *info = (struct inquiry_info *) buf;
970 struct inquiry_entry *e;
971 int copied = 0;
972
Johan Hedberg561aafb2012-01-04 13:31:59 +0200973 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200975
976 if (copied >= num)
977 break;
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 bacpy(&info->bdaddr, &data->bdaddr);
980 info->pscan_rep_mode = data->pscan_rep_mode;
981 info->pscan_period_mode = data->pscan_period_mode;
982 info->pscan_mode = data->pscan_mode;
983 memcpy(info->dev_class, data->dev_class, 3);
984 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200987 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 }
989
990 BT_DBG("cache %p, copied %d", cache, copied);
991 return copied;
992}
993
Johan Hedberg42c6b122013-03-05 20:37:49 +0200994static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995{
996 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 struct hci_cp_inquiry cp;
999
1000 BT_DBG("%s", hdev->name);
1001
1002 if (test_bit(HCI_INQUIRY, &hdev->flags))
1003 return;
1004
1005 /* Start Inquiry */
1006 memcpy(&cp.lap, &ir->lap, 3);
1007 cp.length = ir->length;
1008 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010}
1011
Andre Guedes3e13fa12013-03-27 20:04:56 -03001012static int wait_inquiry(void *word)
1013{
1014 schedule();
1015 return signal_pending(current);
1016}
1017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018int hci_inquiry(void __user *arg)
1019{
1020 __u8 __user *ptr = arg;
1021 struct hci_inquiry_req ir;
1022 struct hci_dev *hdev;
1023 int err = 0, do_inquiry = 0, max_rsp;
1024 long timeo;
1025 __u8 *buf;
1026
1027 if (copy_from_user(&ir, ptr, sizeof(ir)))
1028 return -EFAULT;
1029
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001030 hdev = hci_dev_get(ir.dev_id);
1031 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 return -ENODEV;
1033
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001034 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1035 err = -EBUSY;
1036 goto done;
1037 }
1038
Johan Hedberg56f87902013-10-02 13:43:13 +03001039 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1040 err = -EOPNOTSUPP;
1041 goto done;
1042 }
1043
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001044 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001045 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001046 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001047 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 do_inquiry = 1;
1049 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001050 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Marcel Holtmann04837f62006-07-03 10:02:33 +02001052 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001053
1054 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001055 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1056 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001057 if (err < 0)
1058 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001059
1060 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1061 * cleared). If it is interrupted by a signal, return -EINTR.
1062 */
1063 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1064 TASK_INTERRUPTIBLE))
1065 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001068 /* for unlimited number of responses we will use buffer with
1069 * 255 entries
1070 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1072
1073 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1074 * copy it to the user space.
1075 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001076 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001077 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 err = -ENOMEM;
1079 goto done;
1080 }
1081
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001082 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001084 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 BT_DBG("num_rsp %d", ir.num_rsp);
1087
1088 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1089 ptr += sizeof(ir);
1090 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001091 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001093 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 err = -EFAULT;
1095
1096 kfree(buf);
1097
1098done:
1099 hci_dev_put(hdev);
1100 return err;
1101}
1102
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001103static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1104{
1105 u8 ad_len = 0, flags = 0;
1106 size_t name_len;
1107
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001108 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001109 flags |= LE_AD_GENERAL;
1110
Johan Hedberg11802b22013-10-02 16:02:24 +03001111 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1112 if (lmp_le_br_capable(hdev))
1113 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1114 if (lmp_host_le_br_capable(hdev))
1115 flags |= LE_AD_SIM_LE_BREDR_HOST;
1116 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001117 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001118 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001119
1120 if (flags) {
1121 BT_DBG("adv flags 0x%02x", flags);
1122
1123 ptr[0] = 2;
1124 ptr[1] = EIR_FLAGS;
1125 ptr[2] = flags;
1126
1127 ad_len += 3;
1128 ptr += 3;
1129 }
1130
1131 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1132 ptr[0] = 2;
1133 ptr[1] = EIR_TX_POWER;
1134 ptr[2] = (u8) hdev->adv_tx_power;
1135
1136 ad_len += 3;
1137 ptr += 3;
1138 }
1139
1140 name_len = strlen(hdev->dev_name);
1141 if (name_len > 0) {
1142 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1143
1144 if (name_len > max_len) {
1145 name_len = max_len;
1146 ptr[1] = EIR_NAME_SHORT;
1147 } else
1148 ptr[1] = EIR_NAME_COMPLETE;
1149
1150 ptr[0] = name_len + 1;
1151
1152 memcpy(ptr + 2, hdev->dev_name, name_len);
1153
1154 ad_len += (name_len + 2);
1155 ptr += (name_len + 2);
1156 }
1157
1158 return ad_len;
1159}
1160
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001161void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001162{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001163 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001164 struct hci_cp_le_set_adv_data cp;
1165 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001166
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001167 if (!lmp_le_capable(hdev))
1168 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001169
1170 memset(&cp, 0, sizeof(cp));
1171
1172 len = create_ad(hdev, cp.data);
1173
1174 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001175 memcmp(cp.data, hdev->adv_data, len) == 0)
1176 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001177
1178 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1179 hdev->adv_data_len = len;
1180
1181 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001182
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001183 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001184}
1185
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001186static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 int ret = 0;
1189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 BT_DBG("%s %p", hdev->name, hdev);
1191
1192 hci_req_lock(hdev);
1193
Johan Hovold94324962012-03-15 14:48:41 +01001194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195 ret = -ENODEV;
1196 goto done;
1197 }
1198
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001199 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1200 /* Check for rfkill but allow the HCI setup stage to
1201 * proceed (which in itself doesn't cause any RF activity).
1202 */
1203 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1204 ret = -ERFKILL;
1205 goto done;
1206 }
1207
1208 /* Check for valid public address or a configured static
1209 * random adddress, but let the HCI setup proceed to
1210 * be able to determine if there is a public address
1211 * or not.
1212 *
1213 * This check is only valid for BR/EDR controllers
1214 * since AMP controllers do not have an address.
1215 */
1216 if (hdev->dev_type == HCI_BREDR &&
1217 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1218 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1219 ret = -EADDRNOTAVAIL;
1220 goto done;
1221 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001222 }
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 if (test_bit(HCI_UP, &hdev->flags)) {
1225 ret = -EALREADY;
1226 goto done;
1227 }
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 if (hdev->open(hdev)) {
1230 ret = -EIO;
1231 goto done;
1232 }
1233
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001234 atomic_set(&hdev->cmd_cnt, 1);
1235 set_bit(HCI_INIT, &hdev->flags);
1236
1237 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1238 ret = hdev->setup(hdev);
1239
1240 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001241 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1242 set_bit(HCI_RAW, &hdev->flags);
1243
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001244 if (!test_bit(HCI_RAW, &hdev->flags) &&
1245 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001246 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 }
1248
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001249 clear_bit(HCI_INIT, &hdev->flags);
1250
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 if (!ret) {
1252 hci_dev_hold(hdev);
1253 set_bit(HCI_UP, &hdev->flags);
1254 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001255 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001256 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001257 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001258 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001259 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001260 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001261 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001262 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001264 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001265 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001266 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268 skb_queue_purge(&hdev->cmd_q);
1269 skb_queue_purge(&hdev->rx_q);
1270
1271 if (hdev->flush)
1272 hdev->flush(hdev);
1273
1274 if (hdev->sent_cmd) {
1275 kfree_skb(hdev->sent_cmd);
1276 hdev->sent_cmd = NULL;
1277 }
1278
1279 hdev->close(hdev);
1280 hdev->flags = 0;
1281 }
1282
1283done:
1284 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 return ret;
1286}
1287
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001288/* ---- HCI ioctl helpers ---- */
1289
1290int hci_dev_open(__u16 dev)
1291{
1292 struct hci_dev *hdev;
1293 int err;
1294
1295 hdev = hci_dev_get(dev);
1296 if (!hdev)
1297 return -ENODEV;
1298
Johan Hedberge1d08f42013-10-01 22:44:50 +03001299 /* We need to ensure that no other power on/off work is pending
1300 * before proceeding to call hci_dev_do_open. This is
1301 * particularly important if the setup procedure has not yet
1302 * completed.
1303 */
1304 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1305 cancel_delayed_work(&hdev->power_off);
1306
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001307 /* After this call it is guaranteed that the setup procedure
1308 * has finished. This means that error conditions like RFKILL
1309 * or no valid public or static random address apply.
1310 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001311 flush_workqueue(hdev->req_workqueue);
1312
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001313 err = hci_dev_do_open(hdev);
1314
1315 hci_dev_put(hdev);
1316
1317 return err;
1318}
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320static int hci_dev_do_close(struct hci_dev *hdev)
1321{
1322 BT_DBG("%s %p", hdev->name, hdev);
1323
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001324 cancel_delayed_work(&hdev->power_off);
1325
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 hci_req_cancel(hdev, ENODEV);
1327 hci_req_lock(hdev);
1328
1329 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001330 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 hci_req_unlock(hdev);
1332 return 0;
1333 }
1334
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001335 /* Flush RX and TX works */
1336 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001337 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001339 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001340 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001341 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001342 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001343 }
1344
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001345 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001346 cancel_delayed_work(&hdev->service_cache);
1347
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001348 cancel_delayed_work_sync(&hdev->le_scan_disable);
1349
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001350 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001351 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001353 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
1355 hci_notify(hdev, HCI_DEV_DOWN);
1356
1357 if (hdev->flush)
1358 hdev->flush(hdev);
1359
1360 /* Reset device */
1361 skb_queue_purge(&hdev->cmd_q);
1362 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001363 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001364 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001366 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 clear_bit(HCI_INIT, &hdev->flags);
1368 }
1369
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001370 /* flush cmd work */
1371 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 /* Drop queues */
1374 skb_queue_purge(&hdev->rx_q);
1375 skb_queue_purge(&hdev->cmd_q);
1376 skb_queue_purge(&hdev->raw_q);
1377
1378 /* Drop last sent command */
1379 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001380 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 kfree_skb(hdev->sent_cmd);
1382 hdev->sent_cmd = NULL;
1383 }
1384
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001385 kfree_skb(hdev->recv_evt);
1386 hdev->recv_evt = NULL;
1387
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 /* After this point our queues are empty
1389 * and no tasks are scheduled. */
1390 hdev->close(hdev);
1391
Johan Hedberg35b973c2013-03-15 17:06:59 -05001392 /* Clear flags */
1393 hdev->flags = 0;
1394 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1395
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001396 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1397 if (hdev->dev_type == HCI_BREDR) {
1398 hci_dev_lock(hdev);
1399 mgmt_powered(hdev, 0);
1400 hci_dev_unlock(hdev);
1401 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001402 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001403
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001404 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001405 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001406
Johan Hedberge59fda82012-02-22 18:11:53 +02001407 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001408 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001409
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 hci_req_unlock(hdev);
1411
1412 hci_dev_put(hdev);
1413 return 0;
1414}
1415
1416int hci_dev_close(__u16 dev)
1417{
1418 struct hci_dev *hdev;
1419 int err;
1420
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001421 hdev = hci_dev_get(dev);
1422 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001424
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001425 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1426 err = -EBUSY;
1427 goto done;
1428 }
1429
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001430 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1431 cancel_delayed_work(&hdev->power_off);
1432
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001434
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001435done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 hci_dev_put(hdev);
1437 return err;
1438}
1439
1440int hci_dev_reset(__u16 dev)
1441{
1442 struct hci_dev *hdev;
1443 int ret = 0;
1444
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001445 hdev = hci_dev_get(dev);
1446 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 return -ENODEV;
1448
1449 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Marcel Holtmann808a0492013-08-26 20:57:58 -07001451 if (!test_bit(HCI_UP, &hdev->flags)) {
1452 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001456 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1457 ret = -EBUSY;
1458 goto done;
1459 }
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 /* Drop queues */
1462 skb_queue_purge(&hdev->rx_q);
1463 skb_queue_purge(&hdev->cmd_q);
1464
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001465 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001466 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001468 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
1470 if (hdev->flush)
1471 hdev->flush(hdev);
1472
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001473 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001474 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001477 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
1479done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 hci_req_unlock(hdev);
1481 hci_dev_put(hdev);
1482 return ret;
1483}
1484
1485int hci_dev_reset_stat(__u16 dev)
1486{
1487 struct hci_dev *hdev;
1488 int ret = 0;
1489
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001490 hdev = hci_dev_get(dev);
1491 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 return -ENODEV;
1493
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001494 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1495 ret = -EBUSY;
1496 goto done;
1497 }
1498
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1500
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001501done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 return ret;
1504}
1505
1506int hci_dev_cmd(unsigned int cmd, void __user *arg)
1507{
1508 struct hci_dev *hdev;
1509 struct hci_dev_req dr;
1510 int err = 0;
1511
1512 if (copy_from_user(&dr, arg, sizeof(dr)))
1513 return -EFAULT;
1514
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001515 hdev = hci_dev_get(dr.dev_id);
1516 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return -ENODEV;
1518
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001519 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1520 err = -EBUSY;
1521 goto done;
1522 }
1523
Johan Hedberg56f87902013-10-02 13:43:13 +03001524 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1525 err = -EOPNOTSUPP;
1526 goto done;
1527 }
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 switch (cmd) {
1530 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001531 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1532 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 break;
1534
1535 case HCISETENCRYPT:
1536 if (!lmp_encrypt_capable(hdev)) {
1537 err = -EOPNOTSUPP;
1538 break;
1539 }
1540
1541 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1542 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001543 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1544 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 if (err)
1546 break;
1547 }
1548
Johan Hedberg01178cd2013-03-05 20:37:41 +02001549 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1550 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 break;
1552
1553 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001554 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1555 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 break;
1557
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001558 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001559 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1560 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001561 break;
1562
1563 case HCISETLINKMODE:
1564 hdev->link_mode = ((__u16) dr.dev_opt) &
1565 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1566 break;
1567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 case HCISETPTYPE:
1569 hdev->pkt_type = (__u16) dr.dev_opt;
1570 break;
1571
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001573 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1574 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 break;
1576
1577 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001578 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1579 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 break;
1581
1582 default:
1583 err = -EINVAL;
1584 break;
1585 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001586
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001587done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 hci_dev_put(hdev);
1589 return err;
1590}
1591
1592int hci_get_dev_list(void __user *arg)
1593{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001594 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 struct hci_dev_list_req *dl;
1596 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 int n = 0, size, err;
1598 __u16 dev_num;
1599
1600 if (get_user(dev_num, (__u16 __user *) arg))
1601 return -EFAULT;
1602
1603 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1604 return -EINVAL;
1605
1606 size = sizeof(*dl) + dev_num * sizeof(*dr);
1607
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001608 dl = kzalloc(size, GFP_KERNEL);
1609 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 return -ENOMEM;
1611
1612 dr = dl->dev_req;
1613
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001614 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001615 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001616 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001617 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001618
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001619 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1620 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001621
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 (dr + n)->dev_id = hdev->id;
1623 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001624
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 if (++n >= dev_num)
1626 break;
1627 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001628 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629
1630 dl->dev_num = n;
1631 size = sizeof(*dl) + n * sizeof(*dr);
1632
1633 err = copy_to_user(arg, dl, size);
1634 kfree(dl);
1635
1636 return err ? -EFAULT : 0;
1637}
1638
1639int hci_get_dev_info(void __user *arg)
1640{
1641 struct hci_dev *hdev;
1642 struct hci_dev_info di;
1643 int err = 0;
1644
1645 if (copy_from_user(&di, arg, sizeof(di)))
1646 return -EFAULT;
1647
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001648 hdev = hci_dev_get(di.dev_id);
1649 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 return -ENODEV;
1651
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001652 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001653 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001654
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001655 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1656 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001657
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 strcpy(di.name, hdev->name);
1659 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001660 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 di.flags = hdev->flags;
1662 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001663 if (lmp_bredr_capable(hdev)) {
1664 di.acl_mtu = hdev->acl_mtu;
1665 di.acl_pkts = hdev->acl_pkts;
1666 di.sco_mtu = hdev->sco_mtu;
1667 di.sco_pkts = hdev->sco_pkts;
1668 } else {
1669 di.acl_mtu = hdev->le_mtu;
1670 di.acl_pkts = hdev->le_pkts;
1671 di.sco_mtu = 0;
1672 di.sco_pkts = 0;
1673 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 di.link_policy = hdev->link_policy;
1675 di.link_mode = hdev->link_mode;
1676
1677 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1678 memcpy(&di.features, &hdev->features, sizeof(di.features));
1679
1680 if (copy_to_user(arg, &di, sizeof(di)))
1681 err = -EFAULT;
1682
1683 hci_dev_put(hdev);
1684
1685 return err;
1686}
1687
1688/* ---- Interface to HCI drivers ---- */
1689
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001690static int hci_rfkill_set_block(void *data, bool blocked)
1691{
1692 struct hci_dev *hdev = data;
1693
1694 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1695
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001696 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1697 return -EBUSY;
1698
Johan Hedberg5e130362013-09-13 08:58:17 +03001699 if (blocked) {
1700 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001701 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1702 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001703 } else {
1704 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001705 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001706
1707 return 0;
1708}
1709
1710static const struct rfkill_ops hci_rfkill_ops = {
1711 .set_block = hci_rfkill_set_block,
1712};
1713
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001714static void hci_power_on(struct work_struct *work)
1715{
1716 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001717 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001718
1719 BT_DBG("%s", hdev->name);
1720
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001721 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001722 if (err < 0) {
1723 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001724 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001725 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001726
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001727 /* During the HCI setup phase, a few error conditions are
1728 * ignored and they need to be checked now. If they are still
1729 * valid, it is important to turn the device back off.
1730 */
1731 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1732 (hdev->dev_type == HCI_BREDR &&
1733 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1734 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001735 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1736 hci_dev_do_close(hdev);
1737 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001738 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1739 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001740 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001741
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001742 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001743 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001744}
1745
1746static void hci_power_off(struct work_struct *work)
1747{
Johan Hedberg32435532011-11-07 22:16:04 +02001748 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001749 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001750
1751 BT_DBG("%s", hdev->name);
1752
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001753 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001754}
1755
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001756static void hci_discov_off(struct work_struct *work)
1757{
1758 struct hci_dev *hdev;
1759 u8 scan = SCAN_PAGE;
1760
1761 hdev = container_of(work, struct hci_dev, discov_off.work);
1762
1763 BT_DBG("%s", hdev->name);
1764
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001765 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001766
1767 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1768
1769 hdev->discov_timeout = 0;
1770
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001771 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001772}
1773
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001774int hci_uuids_clear(struct hci_dev *hdev)
1775{
Johan Hedberg48210022013-01-27 00:31:28 +02001776 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001777
Johan Hedberg48210022013-01-27 00:31:28 +02001778 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1779 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001780 kfree(uuid);
1781 }
1782
1783 return 0;
1784}
1785
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001786int hci_link_keys_clear(struct hci_dev *hdev)
1787{
1788 struct list_head *p, *n;
1789
1790 list_for_each_safe(p, n, &hdev->link_keys) {
1791 struct link_key *key;
1792
1793 key = list_entry(p, struct link_key, list);
1794
1795 list_del(p);
1796 kfree(key);
1797 }
1798
1799 return 0;
1800}
1801
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001802int hci_smp_ltks_clear(struct hci_dev *hdev)
1803{
1804 struct smp_ltk *k, *tmp;
1805
1806 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1807 list_del(&k->list);
1808 kfree(k);
1809 }
1810
1811 return 0;
1812}
1813
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001814struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1815{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001816 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001817
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001818 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001819 if (bacmp(bdaddr, &k->bdaddr) == 0)
1820 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001821
1822 return NULL;
1823}
1824
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301825static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001826 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001827{
1828 /* Legacy key */
1829 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301830 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001831
1832 /* Debug keys are insecure so don't store them persistently */
1833 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301834 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001835
1836 /* Changed combination key and there's no previous one */
1837 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301838 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001839
1840 /* Security mode 3 case */
1841 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301842 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001843
1844 /* Neither local nor remote side had no-bonding as requirement */
1845 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301846 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001847
1848 /* Local side had dedicated bonding as requirement */
1849 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301850 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001851
1852 /* Remote side had dedicated bonding as requirement */
1853 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301854 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001855
1856 /* If none of the above criteria match, then don't store the key
1857 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301858 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001859}
1860
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001861struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001862{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001863 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001864
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001865 list_for_each_entry(k, &hdev->long_term_keys, list) {
1866 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001867 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001868 continue;
1869
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001870 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001871 }
1872
1873 return NULL;
1874}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001875
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001876struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001877 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001878{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001879 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001880
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001881 list_for_each_entry(k, &hdev->long_term_keys, list)
1882 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001883 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001884 return k;
1885
1886 return NULL;
1887}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001888
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001889int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001890 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001891{
1892 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301893 u8 old_key_type;
1894 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001895
1896 old_key = hci_find_link_key(hdev, bdaddr);
1897 if (old_key) {
1898 old_key_type = old_key->type;
1899 key = old_key;
1900 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001901 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001902 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1903 if (!key)
1904 return -ENOMEM;
1905 list_add(&key->list, &hdev->link_keys);
1906 }
1907
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001908 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001909
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001910 /* Some buggy controller combinations generate a changed
1911 * combination key for legacy pairing even when there's no
1912 * previous key */
1913 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001914 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001915 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001916 if (conn)
1917 conn->key_type = type;
1918 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001919
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001920 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001921 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001922 key->pin_len = pin_len;
1923
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001924 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001925 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001926 else
1927 key->type = type;
1928
Johan Hedberg4df378a2011-04-28 11:29:03 -07001929 if (!new_key)
1930 return 0;
1931
1932 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1933
Johan Hedberg744cf192011-11-08 20:40:14 +02001934 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001935
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301936 if (conn)
1937 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001938
1939 return 0;
1940}
1941
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001942int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001943 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001944 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001945{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001946 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001947
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001948 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1949 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001950
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001951 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1952 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001953 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001954 else {
1955 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001956 if (!key)
1957 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001958 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001959 }
1960
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001961 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001962 key->bdaddr_type = addr_type;
1963 memcpy(key->val, tk, sizeof(key->val));
1964 key->authenticated = authenticated;
1965 key->ediv = ediv;
1966 key->enc_size = enc_size;
1967 key->type = type;
1968 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001969
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001970 if (!new_key)
1971 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001972
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001973 if (type & HCI_SMP_LTK)
1974 mgmt_new_ltk(hdev, key, 1);
1975
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001976 return 0;
1977}
1978
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001979int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1980{
1981 struct link_key *key;
1982
1983 key = hci_find_link_key(hdev, bdaddr);
1984 if (!key)
1985 return -ENOENT;
1986
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001987 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001988
1989 list_del(&key->list);
1990 kfree(key);
1991
1992 return 0;
1993}
1994
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001995int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1996{
1997 struct smp_ltk *k, *tmp;
1998
1999 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2000 if (bacmp(bdaddr, &k->bdaddr))
2001 continue;
2002
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002003 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002004
2005 list_del(&k->list);
2006 kfree(k);
2007 }
2008
2009 return 0;
2010}
2011
Ville Tervo6bd32322011-02-16 16:32:41 +02002012/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002013static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002014{
2015 struct hci_dev *hdev = (void *) arg;
2016
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002017 if (hdev->sent_cmd) {
2018 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2019 u16 opcode = __le16_to_cpu(sent->opcode);
2020
2021 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2022 } else {
2023 BT_ERR("%s command tx timeout", hdev->name);
2024 }
2025
Ville Tervo6bd32322011-02-16 16:32:41 +02002026 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002027 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002028}
2029
Szymon Janc2763eda2011-03-22 13:12:22 +01002030struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002031 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002032{
2033 struct oob_data *data;
2034
2035 list_for_each_entry(data, &hdev->remote_oob_data, list)
2036 if (bacmp(bdaddr, &data->bdaddr) == 0)
2037 return data;
2038
2039 return NULL;
2040}
2041
2042int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2043{
2044 struct oob_data *data;
2045
2046 data = hci_find_remote_oob_data(hdev, bdaddr);
2047 if (!data)
2048 return -ENOENT;
2049
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002050 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002051
2052 list_del(&data->list);
2053 kfree(data);
2054
2055 return 0;
2056}
2057
2058int hci_remote_oob_data_clear(struct hci_dev *hdev)
2059{
2060 struct oob_data *data, *n;
2061
2062 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2063 list_del(&data->list);
2064 kfree(data);
2065 }
2066
2067 return 0;
2068}
2069
2070int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002071 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002072{
2073 struct oob_data *data;
2074
2075 data = hci_find_remote_oob_data(hdev, bdaddr);
2076
2077 if (!data) {
2078 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2079 if (!data)
2080 return -ENOMEM;
2081
2082 bacpy(&data->bdaddr, bdaddr);
2083 list_add(&data->list, &hdev->remote_oob_data);
2084 }
2085
2086 memcpy(data->hash, hash, sizeof(data->hash));
2087 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2088
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002089 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002090
2091 return 0;
2092}
2093
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002094struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002095{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002096 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002097
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002098 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002099 if (bacmp(bdaddr, &b->bdaddr) == 0)
2100 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002101
2102 return NULL;
2103}
2104
2105int hci_blacklist_clear(struct hci_dev *hdev)
2106{
2107 struct list_head *p, *n;
2108
2109 list_for_each_safe(p, n, &hdev->blacklist) {
2110 struct bdaddr_list *b;
2111
2112 b = list_entry(p, struct bdaddr_list, list);
2113
2114 list_del(p);
2115 kfree(b);
2116 }
2117
2118 return 0;
2119}
2120
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002121int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002122{
2123 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002124
2125 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2126 return -EBADF;
2127
Antti Julku5e762442011-08-25 16:48:02 +03002128 if (hci_blacklist_lookup(hdev, bdaddr))
2129 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002130
2131 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002132 if (!entry)
2133 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002134
2135 bacpy(&entry->bdaddr, bdaddr);
2136
2137 list_add(&entry->list, &hdev->blacklist);
2138
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002139 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002140}
2141
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002142int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002143{
2144 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002145
Szymon Janc1ec918c2011-11-16 09:32:21 +01002146 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002147 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002148
2149 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002150 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002151 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002152
2153 list_del(&entry->list);
2154 kfree(entry);
2155
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002156 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002157}
2158
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002159static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002160{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002161 if (status) {
2162 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002163
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002164 hci_dev_lock(hdev);
2165 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2166 hci_dev_unlock(hdev);
2167 return;
2168 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002169}
2170
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002171static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002172{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002173 /* General inquiry access code (GIAC) */
2174 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2175 struct hci_request req;
2176 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002177 int err;
2178
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002179 if (status) {
2180 BT_ERR("Failed to disable LE scanning: status %d", status);
2181 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002182 }
2183
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002184 switch (hdev->discovery.type) {
2185 case DISCOV_TYPE_LE:
2186 hci_dev_lock(hdev);
2187 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2188 hci_dev_unlock(hdev);
2189 break;
2190
2191 case DISCOV_TYPE_INTERLEAVED:
2192 hci_req_init(&req, hdev);
2193
2194 memset(&cp, 0, sizeof(cp));
2195 memcpy(&cp.lap, lap, sizeof(cp.lap));
2196 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2197 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2198
2199 hci_dev_lock(hdev);
2200
2201 hci_inquiry_cache_flush(hdev);
2202
2203 err = hci_req_run(&req, inquiry_complete);
2204 if (err) {
2205 BT_ERR("Inquiry request failed: err %d", err);
2206 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2207 }
2208
2209 hci_dev_unlock(hdev);
2210 break;
2211 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002212}
2213
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002214static void le_scan_disable_work(struct work_struct *work)
2215{
2216 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002217 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002218 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002219 struct hci_request req;
2220 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002221
2222 BT_DBG("%s", hdev->name);
2223
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002224 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002225
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002226 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002227 cp.enable = LE_SCAN_DISABLE;
2228 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002229
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002230 err = hci_req_run(&req, le_scan_disable_work_complete);
2231 if (err)
2232 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002233}
2234
David Herrmann9be0dab2012-04-22 14:39:57 +02002235/* Alloc HCI device */
2236struct hci_dev *hci_alloc_dev(void)
2237{
2238 struct hci_dev *hdev;
2239
2240 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2241 if (!hdev)
2242 return NULL;
2243
David Herrmannb1b813d2012-04-22 14:39:58 +02002244 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2245 hdev->esco_type = (ESCO_HV1);
2246 hdev->link_mode = (HCI_LM_ACCEPT);
2247 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002248 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2249 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002250
David Herrmannb1b813d2012-04-22 14:39:58 +02002251 hdev->sniff_max_interval = 800;
2252 hdev->sniff_min_interval = 80;
2253
2254 mutex_init(&hdev->lock);
2255 mutex_init(&hdev->req_lock);
2256
2257 INIT_LIST_HEAD(&hdev->mgmt_pending);
2258 INIT_LIST_HEAD(&hdev->blacklist);
2259 INIT_LIST_HEAD(&hdev->uuids);
2260 INIT_LIST_HEAD(&hdev->link_keys);
2261 INIT_LIST_HEAD(&hdev->long_term_keys);
2262 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002263 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002264
2265 INIT_WORK(&hdev->rx_work, hci_rx_work);
2266 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2267 INIT_WORK(&hdev->tx_work, hci_tx_work);
2268 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002269
David Herrmannb1b813d2012-04-22 14:39:58 +02002270 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2271 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2272 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2273
David Herrmannb1b813d2012-04-22 14:39:58 +02002274 skb_queue_head_init(&hdev->rx_q);
2275 skb_queue_head_init(&hdev->cmd_q);
2276 skb_queue_head_init(&hdev->raw_q);
2277
2278 init_waitqueue_head(&hdev->req_wait_q);
2279
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002280 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002281
David Herrmannb1b813d2012-04-22 14:39:58 +02002282 hci_init_sysfs(hdev);
2283 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002284
2285 return hdev;
2286}
2287EXPORT_SYMBOL(hci_alloc_dev);
2288
2289/* Free HCI device */
2290void hci_free_dev(struct hci_dev *hdev)
2291{
David Herrmann9be0dab2012-04-22 14:39:57 +02002292 /* will free via device release */
2293 put_device(&hdev->dev);
2294}
2295EXPORT_SYMBOL(hci_free_dev);
2296
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297/* Register HCI device */
2298int hci_register_dev(struct hci_dev *hdev)
2299{
David Herrmannb1b813d2012-04-22 14:39:58 +02002300 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
David Herrmann010666a2012-01-07 15:47:07 +01002302 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 return -EINVAL;
2304
Mat Martineau08add512011-11-02 16:18:36 -07002305 /* Do not allow HCI_AMP devices to register at index 0,
2306 * so the index can be used as the AMP controller ID.
2307 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002308 switch (hdev->dev_type) {
2309 case HCI_BREDR:
2310 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2311 break;
2312 case HCI_AMP:
2313 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2314 break;
2315 default:
2316 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002318
Sasha Levin3df92b32012-05-27 22:36:56 +02002319 if (id < 0)
2320 return id;
2321
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 sprintf(hdev->name, "hci%d", id);
2323 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002324
2325 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2326
Kees Cookd8537542013-07-03 15:04:57 -07002327 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2328 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002329 if (!hdev->workqueue) {
2330 error = -ENOMEM;
2331 goto err;
2332 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002333
Kees Cookd8537542013-07-03 15:04:57 -07002334 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2335 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002336 if (!hdev->req_workqueue) {
2337 destroy_workqueue(hdev->workqueue);
2338 error = -ENOMEM;
2339 goto err;
2340 }
2341
David Herrmann33ca9542011-10-08 14:58:49 +02002342 error = hci_add_sysfs(hdev);
2343 if (error < 0)
2344 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002346 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002347 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2348 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002349 if (hdev->rfkill) {
2350 if (rfkill_register(hdev->rfkill) < 0) {
2351 rfkill_destroy(hdev->rfkill);
2352 hdev->rfkill = NULL;
2353 }
2354 }
2355
Johan Hedberg5e130362013-09-13 08:58:17 +03002356 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2357 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2358
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002359 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002360 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002361
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002362 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002363 /* Assume BR/EDR support until proven otherwise (such as
2364 * through reading supported features during init.
2365 */
2366 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2367 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002368
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002369 write_lock(&hci_dev_list_lock);
2370 list_add(&hdev->list, &hci_dev_list);
2371 write_unlock(&hci_dev_list_lock);
2372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002374 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
Johan Hedberg19202572013-01-14 22:33:51 +02002376 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002377
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002379
David Herrmann33ca9542011-10-08 14:58:49 +02002380err_wqueue:
2381 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002382 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002383err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002384 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002385
David Herrmann33ca9542011-10-08 14:58:49 +02002386 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387}
2388EXPORT_SYMBOL(hci_register_dev);
2389
2390/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002391void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392{
Sasha Levin3df92b32012-05-27 22:36:56 +02002393 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002394
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002395 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
Johan Hovold94324962012-03-15 14:48:41 +01002397 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2398
Sasha Levin3df92b32012-05-27 22:36:56 +02002399 id = hdev->id;
2400
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002401 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002403 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
2405 hci_dev_do_close(hdev);
2406
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302407 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002408 kfree_skb(hdev->reassembly[i]);
2409
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002410 cancel_work_sync(&hdev->power_on);
2411
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002412 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002413 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002414 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002415 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002416 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002417 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002418
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002419 /* mgmt_index_removed should take care of emptying the
2420 * pending list */
2421 BUG_ON(!list_empty(&hdev->mgmt_pending));
2422
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 hci_notify(hdev, HCI_DEV_UNREG);
2424
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002425 if (hdev->rfkill) {
2426 rfkill_unregister(hdev->rfkill);
2427 rfkill_destroy(hdev->rfkill);
2428 }
2429
David Herrmannce242972011-10-08 14:58:48 +02002430 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002431
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002432 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002433 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002434
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002435 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002436 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002437 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002438 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002439 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002440 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002441 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002442
David Herrmanndc946bd2012-01-07 15:47:24 +01002443 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002444
2445 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446}
2447EXPORT_SYMBOL(hci_unregister_dev);
2448
2449/* Suspend HCI device */
2450int hci_suspend_dev(struct hci_dev *hdev)
2451{
2452 hci_notify(hdev, HCI_DEV_SUSPEND);
2453 return 0;
2454}
2455EXPORT_SYMBOL(hci_suspend_dev);
2456
2457/* Resume HCI device */
2458int hci_resume_dev(struct hci_dev *hdev)
2459{
2460 hci_notify(hdev, HCI_DEV_RESUME);
2461 return 0;
2462}
2463EXPORT_SYMBOL(hci_resume_dev);
2464
Marcel Holtmann76bca882009-11-18 00:40:39 +01002465/* Receive frame from HCI drivers */
2466int hci_recv_frame(struct sk_buff *skb)
2467{
2468 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2469 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002470 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002471 kfree_skb(skb);
2472 return -ENXIO;
2473 }
2474
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002475 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002476 bt_cb(skb)->incoming = 1;
2477
2478 /* Time stamp */
2479 __net_timestamp(skb);
2480
Marcel Holtmann76bca882009-11-18 00:40:39 +01002481 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002482 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002483
Marcel Holtmann76bca882009-11-18 00:40:39 +01002484 return 0;
2485}
2486EXPORT_SYMBOL(hci_recv_frame);
2487
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302488static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002489 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302490{
2491 int len = 0;
2492 int hlen = 0;
2493 int remain = count;
2494 struct sk_buff *skb;
2495 struct bt_skb_cb *scb;
2496
2497 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002498 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302499 return -EILSEQ;
2500
2501 skb = hdev->reassembly[index];
2502
2503 if (!skb) {
2504 switch (type) {
2505 case HCI_ACLDATA_PKT:
2506 len = HCI_MAX_FRAME_SIZE;
2507 hlen = HCI_ACL_HDR_SIZE;
2508 break;
2509 case HCI_EVENT_PKT:
2510 len = HCI_MAX_EVENT_SIZE;
2511 hlen = HCI_EVENT_HDR_SIZE;
2512 break;
2513 case HCI_SCODATA_PKT:
2514 len = HCI_MAX_SCO_SIZE;
2515 hlen = HCI_SCO_HDR_SIZE;
2516 break;
2517 }
2518
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002519 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302520 if (!skb)
2521 return -ENOMEM;
2522
2523 scb = (void *) skb->cb;
2524 scb->expect = hlen;
2525 scb->pkt_type = type;
2526
2527 skb->dev = (void *) hdev;
2528 hdev->reassembly[index] = skb;
2529 }
2530
2531 while (count) {
2532 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002533 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302534
2535 memcpy(skb_put(skb, len), data, len);
2536
2537 count -= len;
2538 data += len;
2539 scb->expect -= len;
2540 remain = count;
2541
2542 switch (type) {
2543 case HCI_EVENT_PKT:
2544 if (skb->len == HCI_EVENT_HDR_SIZE) {
2545 struct hci_event_hdr *h = hci_event_hdr(skb);
2546 scb->expect = h->plen;
2547
2548 if (skb_tailroom(skb) < scb->expect) {
2549 kfree_skb(skb);
2550 hdev->reassembly[index] = NULL;
2551 return -ENOMEM;
2552 }
2553 }
2554 break;
2555
2556 case HCI_ACLDATA_PKT:
2557 if (skb->len == HCI_ACL_HDR_SIZE) {
2558 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2559 scb->expect = __le16_to_cpu(h->dlen);
2560
2561 if (skb_tailroom(skb) < scb->expect) {
2562 kfree_skb(skb);
2563 hdev->reassembly[index] = NULL;
2564 return -ENOMEM;
2565 }
2566 }
2567 break;
2568
2569 case HCI_SCODATA_PKT:
2570 if (skb->len == HCI_SCO_HDR_SIZE) {
2571 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2572 scb->expect = h->dlen;
2573
2574 if (skb_tailroom(skb) < scb->expect) {
2575 kfree_skb(skb);
2576 hdev->reassembly[index] = NULL;
2577 return -ENOMEM;
2578 }
2579 }
2580 break;
2581 }
2582
2583 if (scb->expect == 0) {
2584 /* Complete frame */
2585
2586 bt_cb(skb)->pkt_type = type;
2587 hci_recv_frame(skb);
2588
2589 hdev->reassembly[index] = NULL;
2590 return remain;
2591 }
2592 }
2593
2594 return remain;
2595}
2596
Marcel Holtmannef222012007-07-11 06:42:04 +02002597int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2598{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302599 int rem = 0;
2600
Marcel Holtmannef222012007-07-11 06:42:04 +02002601 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2602 return -EILSEQ;
2603
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002604 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002605 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302606 if (rem < 0)
2607 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002608
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302609 data += (count - rem);
2610 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002611 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002612
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302613 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002614}
2615EXPORT_SYMBOL(hci_recv_fragment);
2616
Suraj Sumangala99811512010-07-14 13:02:19 +05302617#define STREAM_REASSEMBLY 0
2618
2619int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2620{
2621 int type;
2622 int rem = 0;
2623
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002624 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302625 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2626
2627 if (!skb) {
2628 struct { char type; } *pkt;
2629
2630 /* Start of the frame */
2631 pkt = data;
2632 type = pkt->type;
2633
2634 data++;
2635 count--;
2636 } else
2637 type = bt_cb(skb)->pkt_type;
2638
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002639 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002640 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302641 if (rem < 0)
2642 return rem;
2643
2644 data += (count - rem);
2645 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002646 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302647
2648 return rem;
2649}
2650EXPORT_SYMBOL(hci_recv_stream_fragment);
2651
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652/* ---- Interface to upper protocols ---- */
2653
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654int hci_register_cb(struct hci_cb *cb)
2655{
2656 BT_DBG("%p name %s", cb, cb->name);
2657
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002658 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002660 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
2662 return 0;
2663}
2664EXPORT_SYMBOL(hci_register_cb);
2665
2666int hci_unregister_cb(struct hci_cb *cb)
2667{
2668 BT_DBG("%p name %s", cb, cb->name);
2669
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002670 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002672 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
2674 return 0;
2675}
2676EXPORT_SYMBOL(hci_unregister_cb);
2677
2678static int hci_send_frame(struct sk_buff *skb)
2679{
2680 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2681
2682 if (!hdev) {
2683 kfree_skb(skb);
2684 return -ENODEV;
2685 }
2686
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002687 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002689 /* Time stamp */
2690 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002692 /* Send copy to monitor */
2693 hci_send_to_monitor(hdev, skb);
2694
2695 if (atomic_read(&hdev->promisc)) {
2696 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002697 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 }
2699
2700 /* Get rid of skb owner, prior to sending to the driver. */
2701 skb_orphan(skb);
2702
2703 return hdev->send(skb);
2704}
2705
Johan Hedberg3119ae92013-03-05 20:37:44 +02002706void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2707{
2708 skb_queue_head_init(&req->cmd_q);
2709 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002710 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002711}
2712
2713int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2714{
2715 struct hci_dev *hdev = req->hdev;
2716 struct sk_buff *skb;
2717 unsigned long flags;
2718
2719 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2720
Andre Guedes5d73e032013-03-08 11:20:16 -03002721 /* If an error occured during request building, remove all HCI
2722 * commands queued on the HCI request queue.
2723 */
2724 if (req->err) {
2725 skb_queue_purge(&req->cmd_q);
2726 return req->err;
2727 }
2728
Johan Hedberg3119ae92013-03-05 20:37:44 +02002729 /* Do not allow empty requests */
2730 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002731 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002732
2733 skb = skb_peek_tail(&req->cmd_q);
2734 bt_cb(skb)->req.complete = complete;
2735
2736 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2737 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2738 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2739
2740 queue_work(hdev->workqueue, &hdev->cmd_work);
2741
2742 return 0;
2743}
2744
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002745static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002746 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747{
2748 int len = HCI_COMMAND_HDR_SIZE + plen;
2749 struct hci_command_hdr *hdr;
2750 struct sk_buff *skb;
2751
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002753 if (!skb)
2754 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
2756 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002757 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 hdr->plen = plen;
2759
2760 if (plen)
2761 memcpy(skb_put(skb, plen), param, plen);
2762
2763 BT_DBG("skb len %d", skb->len);
2764
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002765 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002767
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002768 return skb;
2769}
2770
2771/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002772int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2773 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002774{
2775 struct sk_buff *skb;
2776
2777 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2778
2779 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2780 if (!skb) {
2781 BT_ERR("%s no memory for command", hdev->name);
2782 return -ENOMEM;
2783 }
2784
Johan Hedberg11714b32013-03-05 20:37:47 +02002785 /* Stand-alone HCI commands must be flaged as
2786 * single-command requests.
2787 */
2788 bt_cb(skb)->req.start = true;
2789
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002791 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
2793 return 0;
2794}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795
Johan Hedberg71c76a12013-03-05 20:37:46 +02002796/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002797void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2798 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002799{
2800 struct hci_dev *hdev = req->hdev;
2801 struct sk_buff *skb;
2802
2803 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2804
Andre Guedes34739c12013-03-08 11:20:18 -03002805 /* If an error occured during request building, there is no point in
2806 * queueing the HCI command. We can simply return.
2807 */
2808 if (req->err)
2809 return;
2810
Johan Hedberg71c76a12013-03-05 20:37:46 +02002811 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2812 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002813 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2814 hdev->name, opcode);
2815 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002816 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002817 }
2818
2819 if (skb_queue_empty(&req->cmd_q))
2820 bt_cb(skb)->req.start = true;
2821
Johan Hedberg02350a72013-04-03 21:50:29 +03002822 bt_cb(skb)->req.event = event;
2823
Johan Hedberg71c76a12013-03-05 20:37:46 +02002824 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002825}
2826
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002827void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2828 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002829{
2830 hci_req_add_ev(req, opcode, plen, param, 0);
2831}
2832
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002834void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835{
2836 struct hci_command_hdr *hdr;
2837
2838 if (!hdev->sent_cmd)
2839 return NULL;
2840
2841 hdr = (void *) hdev->sent_cmd->data;
2842
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002843 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 return NULL;
2845
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002846 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847
2848 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2849}
2850
2851/* Send ACL data */
2852static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2853{
2854 struct hci_acl_hdr *hdr;
2855 int len = skb->len;
2856
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002857 skb_push(skb, HCI_ACL_HDR_SIZE);
2858 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002859 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002860 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2861 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862}
2863
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002864static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002865 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002867 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 struct hci_dev *hdev = conn->hdev;
2869 struct sk_buff *list;
2870
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002871 skb->len = skb_headlen(skb);
2872 skb->data_len = 0;
2873
2874 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002875
2876 switch (hdev->dev_type) {
2877 case HCI_BREDR:
2878 hci_add_acl_hdr(skb, conn->handle, flags);
2879 break;
2880 case HCI_AMP:
2881 hci_add_acl_hdr(skb, chan->handle, flags);
2882 break;
2883 default:
2884 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2885 return;
2886 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002887
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002888 list = skb_shinfo(skb)->frag_list;
2889 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 /* Non fragmented */
2891 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2892
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002893 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 } else {
2895 /* Fragmented */
2896 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2897
2898 skb_shinfo(skb)->frag_list = NULL;
2899
2900 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002901 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002903 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002904
2905 flags &= ~ACL_START;
2906 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 do {
2908 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002909
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002911 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002912 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913
2914 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2915
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002916 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 } while (list);
2918
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002919 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002921}
2922
2923void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2924{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002925 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002926
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002927 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002928
2929 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002930
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002931 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002933 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935
2936/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002937void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938{
2939 struct hci_dev *hdev = conn->hdev;
2940 struct hci_sco_hdr hdr;
2941
2942 BT_DBG("%s len %d", hdev->name, skb->len);
2943
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002944 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 hdr.dlen = skb->len;
2946
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002947 skb_push(skb, HCI_SCO_HDR_SIZE);
2948 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002949 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950
2951 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002952 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002953
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002955 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957
2958/* ---- HCI TX task (outgoing data) ---- */
2959
2960/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002961static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2962 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963{
2964 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002965 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002966 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002968 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002970
2971 rcu_read_lock();
2972
2973 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002974 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002976
2977 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2978 continue;
2979
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 num++;
2981
2982 if (c->sent < min) {
2983 min = c->sent;
2984 conn = c;
2985 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002986
2987 if (hci_conn_num(hdev, type) == num)
2988 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 }
2990
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002991 rcu_read_unlock();
2992
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002994 int cnt, q;
2995
2996 switch (conn->type) {
2997 case ACL_LINK:
2998 cnt = hdev->acl_cnt;
2999 break;
3000 case SCO_LINK:
3001 case ESCO_LINK:
3002 cnt = hdev->sco_cnt;
3003 break;
3004 case LE_LINK:
3005 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3006 break;
3007 default:
3008 cnt = 0;
3009 BT_ERR("Unknown link type");
3010 }
3011
3012 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 *quote = q ? q : 1;
3014 } else
3015 *quote = 0;
3016
3017 BT_DBG("conn %p quote %d", conn, *quote);
3018 return conn;
3019}
3020
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003021static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022{
3023 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003024 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025
Ville Tervobae1f5d92011-02-10 22:38:53 -03003026 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003028 rcu_read_lock();
3029
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003031 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003032 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003033 BT_ERR("%s killing stalled connection %pMR",
3034 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003035 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 }
3037 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003038
3039 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040}
3041
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003042static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3043 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003044{
3045 struct hci_conn_hash *h = &hdev->conn_hash;
3046 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003047 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003048 struct hci_conn *conn;
3049 int cnt, q, conn_num = 0;
3050
3051 BT_DBG("%s", hdev->name);
3052
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003053 rcu_read_lock();
3054
3055 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003056 struct hci_chan *tmp;
3057
3058 if (conn->type != type)
3059 continue;
3060
3061 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3062 continue;
3063
3064 conn_num++;
3065
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003066 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003067 struct sk_buff *skb;
3068
3069 if (skb_queue_empty(&tmp->data_q))
3070 continue;
3071
3072 skb = skb_peek(&tmp->data_q);
3073 if (skb->priority < cur_prio)
3074 continue;
3075
3076 if (skb->priority > cur_prio) {
3077 num = 0;
3078 min = ~0;
3079 cur_prio = skb->priority;
3080 }
3081
3082 num++;
3083
3084 if (conn->sent < min) {
3085 min = conn->sent;
3086 chan = tmp;
3087 }
3088 }
3089
3090 if (hci_conn_num(hdev, type) == conn_num)
3091 break;
3092 }
3093
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003094 rcu_read_unlock();
3095
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003096 if (!chan)
3097 return NULL;
3098
3099 switch (chan->conn->type) {
3100 case ACL_LINK:
3101 cnt = hdev->acl_cnt;
3102 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003103 case AMP_LINK:
3104 cnt = hdev->block_cnt;
3105 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003106 case SCO_LINK:
3107 case ESCO_LINK:
3108 cnt = hdev->sco_cnt;
3109 break;
3110 case LE_LINK:
3111 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3112 break;
3113 default:
3114 cnt = 0;
3115 BT_ERR("Unknown link type");
3116 }
3117
3118 q = cnt / num;
3119 *quote = q ? q : 1;
3120 BT_DBG("chan %p quote %d", chan, *quote);
3121 return chan;
3122}
3123
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003124static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3125{
3126 struct hci_conn_hash *h = &hdev->conn_hash;
3127 struct hci_conn *conn;
3128 int num = 0;
3129
3130 BT_DBG("%s", hdev->name);
3131
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003132 rcu_read_lock();
3133
3134 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003135 struct hci_chan *chan;
3136
3137 if (conn->type != type)
3138 continue;
3139
3140 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3141 continue;
3142
3143 num++;
3144
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003145 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003146 struct sk_buff *skb;
3147
3148 if (chan->sent) {
3149 chan->sent = 0;
3150 continue;
3151 }
3152
3153 if (skb_queue_empty(&chan->data_q))
3154 continue;
3155
3156 skb = skb_peek(&chan->data_q);
3157 if (skb->priority >= HCI_PRIO_MAX - 1)
3158 continue;
3159
3160 skb->priority = HCI_PRIO_MAX - 1;
3161
3162 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003163 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003164 }
3165
3166 if (hci_conn_num(hdev, type) == num)
3167 break;
3168 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003169
3170 rcu_read_unlock();
3171
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003172}
3173
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003174static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3175{
3176 /* Calculate count of blocks used by this packet */
3177 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3178}
3179
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003180static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 if (!test_bit(HCI_RAW, &hdev->flags)) {
3183 /* ACL tx timeout must be longer than maximum
3184 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003185 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003186 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003187 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003189}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003191static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003192{
3193 unsigned int cnt = hdev->acl_cnt;
3194 struct hci_chan *chan;
3195 struct sk_buff *skb;
3196 int quote;
3197
3198 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003199
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003200 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003201 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003202 u32 priority = (skb_peek(&chan->data_q))->priority;
3203 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003204 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003205 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003206
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003207 /* Stop if priority has changed */
3208 if (skb->priority < priority)
3209 break;
3210
3211 skb = skb_dequeue(&chan->data_q);
3212
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003213 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003214 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003215
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 hci_send_frame(skb);
3217 hdev->acl_last_tx = jiffies;
3218
3219 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003220 chan->sent++;
3221 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 }
3223 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003224
3225 if (cnt != hdev->acl_cnt)
3226 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227}
3228
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003229static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003230{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003231 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003232 struct hci_chan *chan;
3233 struct sk_buff *skb;
3234 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003235 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003236
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003237 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003238
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003239 BT_DBG("%s", hdev->name);
3240
3241 if (hdev->dev_type == HCI_AMP)
3242 type = AMP_LINK;
3243 else
3244 type = ACL_LINK;
3245
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003246 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003247 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003248 u32 priority = (skb_peek(&chan->data_q))->priority;
3249 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3250 int blocks;
3251
3252 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003253 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003254
3255 /* Stop if priority has changed */
3256 if (skb->priority < priority)
3257 break;
3258
3259 skb = skb_dequeue(&chan->data_q);
3260
3261 blocks = __get_blocks(hdev, skb);
3262 if (blocks > hdev->block_cnt)
3263 return;
3264
3265 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003266 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003267
3268 hci_send_frame(skb);
3269 hdev->acl_last_tx = jiffies;
3270
3271 hdev->block_cnt -= blocks;
3272 quote -= blocks;
3273
3274 chan->sent += blocks;
3275 chan->conn->sent += blocks;
3276 }
3277 }
3278
3279 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003280 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003281}
3282
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003283static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003284{
3285 BT_DBG("%s", hdev->name);
3286
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003287 /* No ACL link over BR/EDR controller */
3288 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3289 return;
3290
3291 /* No AMP link over AMP controller */
3292 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003293 return;
3294
3295 switch (hdev->flow_ctl_mode) {
3296 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3297 hci_sched_acl_pkt(hdev);
3298 break;
3299
3300 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3301 hci_sched_acl_blk(hdev);
3302 break;
3303 }
3304}
3305
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003307static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308{
3309 struct hci_conn *conn;
3310 struct sk_buff *skb;
3311 int quote;
3312
3313 BT_DBG("%s", hdev->name);
3314
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003315 if (!hci_conn_num(hdev, SCO_LINK))
3316 return;
3317
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3319 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3320 BT_DBG("skb %p len %d", skb, skb->len);
3321 hci_send_frame(skb);
3322
3323 conn->sent++;
3324 if (conn->sent == ~0)
3325 conn->sent = 0;
3326 }
3327 }
3328}
3329
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003330static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003331{
3332 struct hci_conn *conn;
3333 struct sk_buff *skb;
3334 int quote;
3335
3336 BT_DBG("%s", hdev->name);
3337
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003338 if (!hci_conn_num(hdev, ESCO_LINK))
3339 return;
3340
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003341 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3342 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003343 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3344 BT_DBG("skb %p len %d", skb, skb->len);
3345 hci_send_frame(skb);
3346
3347 conn->sent++;
3348 if (conn->sent == ~0)
3349 conn->sent = 0;
3350 }
3351 }
3352}
3353
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003354static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003355{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003356 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003357 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003358 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003359
3360 BT_DBG("%s", hdev->name);
3361
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003362 if (!hci_conn_num(hdev, LE_LINK))
3363 return;
3364
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003365 if (!test_bit(HCI_RAW, &hdev->flags)) {
3366 /* LE tx timeout must be longer than maximum
3367 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003368 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003369 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003370 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003371 }
3372
3373 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003374 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003375 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003376 u32 priority = (skb_peek(&chan->data_q))->priority;
3377 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003378 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003379 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003380
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003381 /* Stop if priority has changed */
3382 if (skb->priority < priority)
3383 break;
3384
3385 skb = skb_dequeue(&chan->data_q);
3386
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003387 hci_send_frame(skb);
3388 hdev->le_last_tx = jiffies;
3389
3390 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003391 chan->sent++;
3392 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003393 }
3394 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003395
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003396 if (hdev->le_pkts)
3397 hdev->le_cnt = cnt;
3398 else
3399 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003400
3401 if (cnt != tmp)
3402 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003403}
3404
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003405static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003407 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 struct sk_buff *skb;
3409
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003410 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003411 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
Marcel Holtmann52de5992013-09-03 18:08:38 -07003413 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3414 /* Schedule queues and send stuff to HCI driver */
3415 hci_sched_acl(hdev);
3416 hci_sched_sco(hdev);
3417 hci_sched_esco(hdev);
3418 hci_sched_le(hdev);
3419 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003420
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 /* Send next queued raw (unknown type) packet */
3422 while ((skb = skb_dequeue(&hdev->raw_q)))
3423 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424}
3425
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003426/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
3428/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003429static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430{
3431 struct hci_acl_hdr *hdr = (void *) skb->data;
3432 struct hci_conn *conn;
3433 __u16 handle, flags;
3434
3435 skb_pull(skb, HCI_ACL_HDR_SIZE);
3436
3437 handle = __le16_to_cpu(hdr->handle);
3438 flags = hci_flags(handle);
3439 handle = hci_handle(handle);
3440
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003441 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003442 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443
3444 hdev->stat.acl_rx++;
3445
3446 hci_dev_lock(hdev);
3447 conn = hci_conn_hash_lookup_handle(hdev, handle);
3448 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003449
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003451 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003452
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003454 l2cap_recv_acldata(conn, skb, flags);
3455 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003457 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003458 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 }
3460
3461 kfree_skb(skb);
3462}
3463
3464/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003465static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466{
3467 struct hci_sco_hdr *hdr = (void *) skb->data;
3468 struct hci_conn *conn;
3469 __u16 handle;
3470
3471 skb_pull(skb, HCI_SCO_HDR_SIZE);
3472
3473 handle = __le16_to_cpu(hdr->handle);
3474
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003475 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476
3477 hdev->stat.sco_rx++;
3478
3479 hci_dev_lock(hdev);
3480 conn = hci_conn_hash_lookup_handle(hdev, handle);
3481 hci_dev_unlock(hdev);
3482
3483 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003485 sco_recv_scodata(conn, skb);
3486 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003488 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003489 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 }
3491
3492 kfree_skb(skb);
3493}
3494
Johan Hedberg9238f362013-03-05 20:37:48 +02003495static bool hci_req_is_complete(struct hci_dev *hdev)
3496{
3497 struct sk_buff *skb;
3498
3499 skb = skb_peek(&hdev->cmd_q);
3500 if (!skb)
3501 return true;
3502
3503 return bt_cb(skb)->req.start;
3504}
3505
Johan Hedberg42c6b122013-03-05 20:37:49 +02003506static void hci_resend_last(struct hci_dev *hdev)
3507{
3508 struct hci_command_hdr *sent;
3509 struct sk_buff *skb;
3510 u16 opcode;
3511
3512 if (!hdev->sent_cmd)
3513 return;
3514
3515 sent = (void *) hdev->sent_cmd->data;
3516 opcode = __le16_to_cpu(sent->opcode);
3517 if (opcode == HCI_OP_RESET)
3518 return;
3519
3520 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3521 if (!skb)
3522 return;
3523
3524 skb_queue_head(&hdev->cmd_q, skb);
3525 queue_work(hdev->workqueue, &hdev->cmd_work);
3526}
3527
Johan Hedberg9238f362013-03-05 20:37:48 +02003528void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3529{
3530 hci_req_complete_t req_complete = NULL;
3531 struct sk_buff *skb;
3532 unsigned long flags;
3533
3534 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3535
Johan Hedberg42c6b122013-03-05 20:37:49 +02003536 /* If the completed command doesn't match the last one that was
3537 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003538 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003539 if (!hci_sent_cmd_data(hdev, opcode)) {
3540 /* Some CSR based controllers generate a spontaneous
3541 * reset complete event during init and any pending
3542 * command will never be completed. In such a case we
3543 * need to resend whatever was the last sent
3544 * command.
3545 */
3546 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3547 hci_resend_last(hdev);
3548
Johan Hedberg9238f362013-03-05 20:37:48 +02003549 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003550 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003551
3552 /* If the command succeeded and there's still more commands in
3553 * this request the request is not yet complete.
3554 */
3555 if (!status && !hci_req_is_complete(hdev))
3556 return;
3557
3558 /* If this was the last command in a request the complete
3559 * callback would be found in hdev->sent_cmd instead of the
3560 * command queue (hdev->cmd_q).
3561 */
3562 if (hdev->sent_cmd) {
3563 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003564
3565 if (req_complete) {
3566 /* We must set the complete callback to NULL to
3567 * avoid calling the callback more than once if
3568 * this function gets called again.
3569 */
3570 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3571
Johan Hedberg9238f362013-03-05 20:37:48 +02003572 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003573 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003574 }
3575
3576 /* Remove all pending commands belonging to this request */
3577 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3578 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3579 if (bt_cb(skb)->req.start) {
3580 __skb_queue_head(&hdev->cmd_q, skb);
3581 break;
3582 }
3583
3584 req_complete = bt_cb(skb)->req.complete;
3585 kfree_skb(skb);
3586 }
3587 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3588
3589call_complete:
3590 if (req_complete)
3591 req_complete(hdev, status);
3592}
3593
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003594static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003596 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 struct sk_buff *skb;
3598
3599 BT_DBG("%s", hdev->name);
3600
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003602 /* Send copy to monitor */
3603 hci_send_to_monitor(hdev, skb);
3604
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 if (atomic_read(&hdev->promisc)) {
3606 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003607 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 }
3609
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003610 if (test_bit(HCI_RAW, &hdev->flags) ||
3611 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 kfree_skb(skb);
3613 continue;
3614 }
3615
3616 if (test_bit(HCI_INIT, &hdev->flags)) {
3617 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003618 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619 case HCI_ACLDATA_PKT:
3620 case HCI_SCODATA_PKT:
3621 kfree_skb(skb);
3622 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 }
3625
3626 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003627 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003629 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 hci_event_packet(hdev, skb);
3631 break;
3632
3633 case HCI_ACLDATA_PKT:
3634 BT_DBG("%s ACL data packet", hdev->name);
3635 hci_acldata_packet(hdev, skb);
3636 break;
3637
3638 case HCI_SCODATA_PKT:
3639 BT_DBG("%s SCO data packet", hdev->name);
3640 hci_scodata_packet(hdev, skb);
3641 break;
3642
3643 default:
3644 kfree_skb(skb);
3645 break;
3646 }
3647 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648}
3649
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003650static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003652 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 struct sk_buff *skb;
3654
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003655 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3656 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003659 if (atomic_read(&hdev->cmd_cnt)) {
3660 skb = skb_dequeue(&hdev->cmd_q);
3661 if (!skb)
3662 return;
3663
Wei Yongjun7585b972009-02-25 18:29:52 +08003664 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003666 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003667 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 atomic_dec(&hdev->cmd_cnt);
3669 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003670 if (test_bit(HCI_RESET, &hdev->flags))
3671 del_timer(&hdev->cmd_timer);
3672 else
3673 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003674 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 } else {
3676 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003677 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678 }
3679 }
3680}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003681
Andre Guedes31f79562012-04-24 21:02:53 -03003682u8 bdaddr_to_le(u8 bdaddr_type)
3683{
3684 switch (bdaddr_type) {
3685 case BDADDR_LE_PUBLIC:
3686 return ADDR_LE_DEV_PUBLIC;
3687
3688 default:
3689 /* Fallback to LE Random address type */
3690 return ADDR_LE_DEV_RANDOM;
3691 }
3692}