blob: 967739c0285b75411e120c6733bd26f2961a8adb [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700321
322 /* Read Location Data */
323 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200324}
325
Johan Hedberg42c6b122013-03-05 20:37:49 +0200326static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200328 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200329
330 BT_DBG("%s %ld", hdev->name, opt);
331
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300332 /* Reset */
333 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200334 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300335
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200336 switch (hdev->dev_type) {
337 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200338 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339 break;
340
341 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200343 break;
344
345 default:
346 BT_ERR("Unknown device type %d", hdev->dev_type);
347 break;
348 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200349}
350
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200353 __le16 param;
354 __u8 flt_type;
355
356 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200360 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200361
362 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200364
365 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200366 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200367
368 /* Clear Event Filters */
369 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200371
372 /* Connection accept timeout ~20 secs */
373 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375
Johan Hedbergf332ec62013-03-15 17:07:11 -0500376 /* Read page scan parameters */
377 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
378 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
379 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
380 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200381}
382
Johan Hedberg42c6b122013-03-05 20:37:49 +0200383static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200384{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300385 struct hci_dev *hdev = req->hdev;
386
Johan Hedberg2177bab2013-03-05 20:37:43 +0200387 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200392
393 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200395
396 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398
399 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300401
402 /* LE-only controllers have LE implicitly enabled */
403 if (!lmp_bredr_capable(hdev))
404 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200405}
406
407static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
408{
409 if (lmp_ext_inq_capable(hdev))
410 return 0x02;
411
412 if (lmp_inq_rssi_capable(hdev))
413 return 0x01;
414
415 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
416 hdev->lmp_subver == 0x0757)
417 return 0x01;
418
419 if (hdev->manufacturer == 15) {
420 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
421 return 0x01;
422 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
423 return 0x01;
424 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
425 return 0x01;
426 }
427
428 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
429 hdev->lmp_subver == 0x1805)
430 return 0x01;
431
432 return 0x00;
433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
437 u8 mode;
438
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200440
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200442}
443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200446 struct hci_dev *hdev = req->hdev;
447
Johan Hedberg2177bab2013-03-05 20:37:43 +0200448 /* The second byte is 0xff instead of 0x9f (two reserved bits
449 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
450 * command otherwise.
451 */
452 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
453
454 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
455 * any event mask for pre 1.2 devices.
456 */
457 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
458 return;
459
460 if (lmp_bredr_capable(hdev)) {
461 events[4] |= 0x01; /* Flow Specification Complete */
462 events[4] |= 0x02; /* Inquiry Result with RSSI */
463 events[4] |= 0x04; /* Read Remote Extended Features Complete */
464 events[5] |= 0x08; /* Synchronous Connection Complete */
465 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700466 } else {
467 /* Use a different default for LE-only devices */
468 memset(events, 0, sizeof(events));
469 events[0] |= 0x10; /* Disconnection Complete */
470 events[0] |= 0x80; /* Encryption Change */
471 events[1] |= 0x08; /* Read Remote Version Information Complete */
472 events[1] |= 0x20; /* Command Complete */
473 events[1] |= 0x40; /* Command Status */
474 events[1] |= 0x80; /* Hardware Error */
475 events[2] |= 0x04; /* Number of Completed Packets */
476 events[3] |= 0x02; /* Data Buffer Overflow */
477 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200478 }
479
480 if (lmp_inq_rssi_capable(hdev))
481 events[4] |= 0x02; /* Inquiry Result with RSSI */
482
483 if (lmp_sniffsubr_capable(hdev))
484 events[5] |= 0x20; /* Sniff Subrating */
485
486 if (lmp_pause_enc_capable(hdev))
487 events[5] |= 0x80; /* Encryption Key Refresh Complete */
488
489 if (lmp_ext_inq_capable(hdev))
490 events[5] |= 0x40; /* Extended Inquiry Result */
491
492 if (lmp_no_flush_capable(hdev))
493 events[7] |= 0x01; /* Enhanced Flush Complete */
494
495 if (lmp_lsto_capable(hdev))
496 events[6] |= 0x80; /* Link Supervision Timeout Changed */
497
498 if (lmp_ssp_capable(hdev)) {
499 events[6] |= 0x01; /* IO Capability Request */
500 events[6] |= 0x02; /* IO Capability Response */
501 events[6] |= 0x04; /* User Confirmation Request */
502 events[6] |= 0x08; /* User Passkey Request */
503 events[6] |= 0x10; /* Remote OOB Data Request */
504 events[6] |= 0x20; /* Simple Pairing Complete */
505 events[7] |= 0x04; /* User Passkey Notification */
506 events[7] |= 0x08; /* Keypress Notification */
507 events[7] |= 0x10; /* Remote Host Supported
508 * Features Notification
509 */
510 }
511
512 if (lmp_le_capable(hdev))
513 events[7] |= 0x20; /* LE Meta-Event */
514
Johan Hedberg42c6b122013-03-05 20:37:49 +0200515 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200516
517 if (lmp_le_capable(hdev)) {
518 memset(events, 0, sizeof(events));
519 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200520 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
521 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522 }
523}
524
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200527 struct hci_dev *hdev = req->hdev;
528
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300531 else
532 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200538
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300539 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
540 * local supported commands HCI command.
541 */
542 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200544
545 if (lmp_ssp_capable(hdev)) {
546 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
547 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
549 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200550 } else {
551 struct hci_cp_write_eir cp;
552
553 memset(hdev->eir, 0, sizeof(hdev->eir));
554 memset(&cp, 0, sizeof(cp));
555
Johan Hedberg42c6b122013-03-05 20:37:49 +0200556 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557 }
558 }
559
560 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200561 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200562
563 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200564 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200565
566 if (lmp_ext_feat_capable(hdev)) {
567 struct hci_cp_read_local_ext_features cp;
568
569 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
571 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200572 }
573
574 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
575 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
577 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578 }
579}
580
Johan Hedberg42c6b122013-03-05 20:37:49 +0200581static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200582{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200584 struct hci_cp_write_def_link_policy cp;
585 u16 link_policy = 0;
586
587 if (lmp_rswitch_capable(hdev))
588 link_policy |= HCI_LP_RSWITCH;
589 if (lmp_hold_capable(hdev))
590 link_policy |= HCI_LP_HOLD;
591 if (lmp_sniff_capable(hdev))
592 link_policy |= HCI_LP_SNIFF;
593 if (lmp_park_capable(hdev))
594 link_policy |= HCI_LP_PARK;
595
596 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200597 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598}
599
Johan Hedberg42c6b122013-03-05 20:37:49 +0200600static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200601{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200602 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200603 struct hci_cp_write_le_host_supported cp;
604
Johan Hedbergc73eee92013-04-19 18:35:21 +0300605 /* LE-only devices do not support explicit enablement */
606 if (!lmp_bredr_capable(hdev))
607 return;
608
Johan Hedberg2177bab2013-03-05 20:37:43 +0200609 memset(&cp, 0, sizeof(cp));
610
611 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
612 cp.le = 0x01;
613 cp.simul = lmp_le_br_capable(hdev);
614 }
615
616 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200617 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
618 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200619}
620
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300621static void hci_set_event_mask_page_2(struct hci_request *req)
622{
623 struct hci_dev *hdev = req->hdev;
624 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
625
626 /* If Connectionless Slave Broadcast master role is supported
627 * enable all necessary events for it.
628 */
629 if (hdev->features[2][0] & 0x01) {
630 events[1] |= 0x40; /* Triggered Clock Capture */
631 events[1] |= 0x80; /* Synchronization Train Complete */
632 events[2] |= 0x10; /* Slave Page Response Timeout */
633 events[2] |= 0x20; /* CSB Channel Map Change */
634 }
635
636 /* If Connectionless Slave Broadcast slave role is supported
637 * enable all necessary events for it.
638 */
639 if (hdev->features[2][0] & 0x02) {
640 events[2] |= 0x01; /* Synchronization Train Received */
641 events[2] |= 0x02; /* CSB Receive */
642 events[2] |= 0x04; /* CSB Timeout */
643 events[2] |= 0x08; /* Truncated Page Complete */
644 }
645
646 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
647}
648
Johan Hedberg42c6b122013-03-05 20:37:49 +0200649static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200650{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300652 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200653
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100654 /* Some Broadcom based Bluetooth controllers do not support the
655 * Delete Stored Link Key command. They are clearly indicating its
656 * absence in the bit mask of supported commands.
657 *
658 * Check the supported commands and only if the the command is marked
659 * as supported send it. If not supported assume that the controller
660 * does not have actual support for stored link keys which makes this
661 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700662 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300663 if (hdev->commands[6] & 0x80) {
664 struct hci_cp_delete_stored_link_key cp;
665
666 bacpy(&cp.bdaddr, BDADDR_ANY);
667 cp.delete_all = 0x01;
668 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
669 sizeof(cp), &cp);
670 }
671
Johan Hedberg2177bab2013-03-05 20:37:43 +0200672 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200674
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500675 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500677 hci_update_ad(req);
678 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300679
680 /* Read features beyond page 1 if available */
681 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
682 struct hci_cp_read_local_ext_features cp;
683
684 cp.page = p;
685 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
686 sizeof(cp), &cp);
687 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200688}
689
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300690static void hci_init4_req(struct hci_request *req, unsigned long opt)
691{
692 struct hci_dev *hdev = req->hdev;
693
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300694 /* Set event mask page 2 if the HCI command for it is supported */
695 if (hdev->commands[22] & 0x04)
696 hci_set_event_mask_page_2(req);
697
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300698 /* Check for Synchronization Train support */
699 if (hdev->features[2][0] & 0x04)
700 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
701}
702
Johan Hedberg2177bab2013-03-05 20:37:43 +0200703static int __hci_init(struct hci_dev *hdev)
704{
705 int err;
706
707 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
708 if (err < 0)
709 return err;
710
711 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
712 * BR/EDR/LE type controllers. AMP controllers only need the
713 * first stage init.
714 */
715 if (hdev->dev_type != HCI_BREDR)
716 return 0;
717
718 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
719 if (err < 0)
720 return err;
721
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300722 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
723 if (err < 0)
724 return err;
725
726 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200727}
728
Johan Hedberg42c6b122013-03-05 20:37:49 +0200729static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730{
731 __u8 scan = opt;
732
Johan Hedberg42c6b122013-03-05 20:37:49 +0200733 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200736 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737}
738
Johan Hedberg42c6b122013-03-05 20:37:49 +0200739static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
741 __u8 auth = opt;
742
Johan Hedberg42c6b122013-03-05 20:37:49 +0200743 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
745 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
748
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
751 __u8 encrypt = opt;
752
Johan Hedberg42c6b122013-03-05 20:37:49 +0200753 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200755 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
758
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200760{
761 __le16 policy = cpu_to_le16(opt);
762
Johan Hedberg42c6b122013-03-05 20:37:49 +0200763 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200764
765 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200767}
768
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900769/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 * Device is held on return. */
771struct hci_dev *hci_dev_get(int index)
772{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200773 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 BT_DBG("%d", index);
776
777 if (index < 0)
778 return NULL;
779
780 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200781 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 if (d->id == index) {
783 hdev = hci_dev_hold(d);
784 break;
785 }
786 }
787 read_unlock(&hci_dev_list_lock);
788 return hdev;
789}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200792
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200793bool hci_discovery_active(struct hci_dev *hdev)
794{
795 struct discovery_state *discov = &hdev->discovery;
796
Andre Guedes6fbe1952012-02-03 17:47:58 -0300797 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300798 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300799 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200800 return true;
801
Andre Guedes6fbe1952012-02-03 17:47:58 -0300802 default:
803 return false;
804 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200805}
806
Johan Hedbergff9ef572012-01-04 14:23:45 +0200807void hci_discovery_set_state(struct hci_dev *hdev, int state)
808{
809 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
810
811 if (hdev->discovery.state == state)
812 return;
813
814 switch (state) {
815 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300816 if (hdev->discovery.state != DISCOVERY_STARTING)
817 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200818 break;
819 case DISCOVERY_STARTING:
820 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300821 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200822 mgmt_discovering(hdev, 1);
823 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200824 case DISCOVERY_RESOLVING:
825 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200826 case DISCOVERY_STOPPING:
827 break;
828 }
829
830 hdev->discovery.state = state;
831}
832
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300833void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834{
Johan Hedberg30883512012-01-04 14:16:21 +0200835 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200836 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Johan Hedberg561aafb2012-01-04 13:31:59 +0200838 list_for_each_entry_safe(p, n, &cache->all, all) {
839 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200840 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200842
843 INIT_LIST_HEAD(&cache->unknown);
844 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845}
846
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300847struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
848 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
Johan Hedberg30883512012-01-04 14:16:21 +0200850 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 struct inquiry_entry *e;
852
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300853 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
Johan Hedberg561aafb2012-01-04 13:31:59 +0200855 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200857 return e;
858 }
859
860 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861}
862
Johan Hedberg561aafb2012-01-04 13:31:59 +0200863struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300864 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200865{
Johan Hedberg30883512012-01-04 14:16:21 +0200866 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200867 struct inquiry_entry *e;
868
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300869 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200870
871 list_for_each_entry(e, &cache->unknown, list) {
872 if (!bacmp(&e->data.bdaddr, bdaddr))
873 return e;
874 }
875
876 return NULL;
877}
878
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200879struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300880 bdaddr_t *bdaddr,
881 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200882{
883 struct discovery_state *cache = &hdev->discovery;
884 struct inquiry_entry *e;
885
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300886 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200887
888 list_for_each_entry(e, &cache->resolve, list) {
889 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
890 return e;
891 if (!bacmp(&e->data.bdaddr, bdaddr))
892 return e;
893 }
894
895 return NULL;
896}
897
Johan Hedberga3d4e202012-01-09 00:53:02 +0200898void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300899 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200900{
901 struct discovery_state *cache = &hdev->discovery;
902 struct list_head *pos = &cache->resolve;
903 struct inquiry_entry *p;
904
905 list_del(&ie->list);
906
907 list_for_each_entry(p, &cache->resolve, list) {
908 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300909 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200910 break;
911 pos = &p->list;
912 }
913
914 list_add(&ie->list, pos);
915}
916
Johan Hedberg31754052012-01-04 13:39:52 +0200917bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300918 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
Johan Hedberg30883512012-01-04 14:16:21 +0200920 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200921 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300923 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Szymon Janc2b2fec42012-11-20 11:38:54 +0100925 hci_remove_remote_oob_data(hdev, &data->bdaddr);
926
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200927 if (ssp)
928 *ssp = data->ssp_mode;
929
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200930 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200931 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200932 if (ie->data.ssp_mode && ssp)
933 *ssp = true;
934
Johan Hedberga3d4e202012-01-09 00:53:02 +0200935 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300936 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200937 ie->data.rssi = data->rssi;
938 hci_inquiry_cache_update_resolve(hdev, ie);
939 }
940
Johan Hedberg561aafb2012-01-04 13:31:59 +0200941 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200942 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200943
Johan Hedberg561aafb2012-01-04 13:31:59 +0200944 /* Entry not in the cache. Add new one. */
945 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
946 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200947 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200948
949 list_add(&ie->all, &cache->all);
950
951 if (name_known) {
952 ie->name_state = NAME_KNOWN;
953 } else {
954 ie->name_state = NAME_NOT_KNOWN;
955 list_add(&ie->list, &cache->unknown);
956 }
957
958update:
959 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300960 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200961 ie->name_state = NAME_KNOWN;
962 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 }
964
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200965 memcpy(&ie->data, data, sizeof(*data));
966 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200968
969 if (ie->name_state == NAME_NOT_KNOWN)
970 return false;
971
972 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973}
974
975static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
976{
Johan Hedberg30883512012-01-04 14:16:21 +0200977 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 struct inquiry_info *info = (struct inquiry_info *) buf;
979 struct inquiry_entry *e;
980 int copied = 0;
981
Johan Hedberg561aafb2012-01-04 13:31:59 +0200982 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200984
985 if (copied >= num)
986 break;
987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 bacpy(&info->bdaddr, &data->bdaddr);
989 info->pscan_rep_mode = data->pscan_rep_mode;
990 info->pscan_period_mode = data->pscan_period_mode;
991 info->pscan_mode = data->pscan_mode;
992 memcpy(info->dev_class, data->dev_class, 3);
993 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200996 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 }
998
999 BT_DBG("cache %p, copied %d", cache, copied);
1000 return copied;
1001}
1002
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004{
1005 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 struct hci_cp_inquiry cp;
1008
1009 BT_DBG("%s", hdev->name);
1010
1011 if (test_bit(HCI_INQUIRY, &hdev->flags))
1012 return;
1013
1014 /* Start Inquiry */
1015 memcpy(&cp.lap, &ir->lap, 3);
1016 cp.length = ir->length;
1017 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001018 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019}
1020
Andre Guedes3e13fa12013-03-27 20:04:56 -03001021static int wait_inquiry(void *word)
1022{
1023 schedule();
1024 return signal_pending(current);
1025}
1026
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027int hci_inquiry(void __user *arg)
1028{
1029 __u8 __user *ptr = arg;
1030 struct hci_inquiry_req ir;
1031 struct hci_dev *hdev;
1032 int err = 0, do_inquiry = 0, max_rsp;
1033 long timeo;
1034 __u8 *buf;
1035
1036 if (copy_from_user(&ir, ptr, sizeof(ir)))
1037 return -EFAULT;
1038
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001039 hdev = hci_dev_get(ir.dev_id);
1040 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 return -ENODEV;
1042
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001043 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1044 err = -EBUSY;
1045 goto done;
1046 }
1047
Johan Hedberg56f87902013-10-02 13:43:13 +03001048 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1049 err = -EOPNOTSUPP;
1050 goto done;
1051 }
1052
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001053 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001054 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001055 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001056 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 do_inquiry = 1;
1058 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001059 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Marcel Holtmann04837f62006-07-03 10:02:33 +02001061 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001062
1063 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001064 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1065 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001066 if (err < 0)
1067 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001068
1069 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1070 * cleared). If it is interrupted by a signal, return -EINTR.
1071 */
1072 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1073 TASK_INTERRUPTIBLE))
1074 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001077 /* for unlimited number of responses we will use buffer with
1078 * 255 entries
1079 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1081
1082 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1083 * copy it to the user space.
1084 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001085 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001086 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 err = -ENOMEM;
1088 goto done;
1089 }
1090
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001091 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001093 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
1095 BT_DBG("num_rsp %d", ir.num_rsp);
1096
1097 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1098 ptr += sizeof(ir);
1099 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001100 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001102 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 err = -EFAULT;
1104
1105 kfree(buf);
1106
1107done:
1108 hci_dev_put(hdev);
1109 return err;
1110}
1111
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001112static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1113{
1114 u8 ad_len = 0, flags = 0;
1115 size_t name_len;
1116
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001117 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001118 flags |= LE_AD_GENERAL;
1119
Johan Hedberg11802b22013-10-02 16:02:24 +03001120 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1121 if (lmp_le_br_capable(hdev))
1122 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1123 if (lmp_host_le_br_capable(hdev))
1124 flags |= LE_AD_SIM_LE_BREDR_HOST;
1125 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001126 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001127 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001128
1129 if (flags) {
1130 BT_DBG("adv flags 0x%02x", flags);
1131
1132 ptr[0] = 2;
1133 ptr[1] = EIR_FLAGS;
1134 ptr[2] = flags;
1135
1136 ad_len += 3;
1137 ptr += 3;
1138 }
1139
1140 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1141 ptr[0] = 2;
1142 ptr[1] = EIR_TX_POWER;
1143 ptr[2] = (u8) hdev->adv_tx_power;
1144
1145 ad_len += 3;
1146 ptr += 3;
1147 }
1148
1149 name_len = strlen(hdev->dev_name);
1150 if (name_len > 0) {
1151 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1152
1153 if (name_len > max_len) {
1154 name_len = max_len;
1155 ptr[1] = EIR_NAME_SHORT;
1156 } else
1157 ptr[1] = EIR_NAME_COMPLETE;
1158
1159 ptr[0] = name_len + 1;
1160
1161 memcpy(ptr + 2, hdev->dev_name, name_len);
1162
1163 ad_len += (name_len + 2);
1164 ptr += (name_len + 2);
1165 }
1166
1167 return ad_len;
1168}
1169
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001170void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001171{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001172 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001173 struct hci_cp_le_set_adv_data cp;
1174 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001175
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001176 if (!lmp_le_capable(hdev))
1177 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001178
1179 memset(&cp, 0, sizeof(cp));
1180
1181 len = create_ad(hdev, cp.data);
1182
1183 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001184 memcmp(cp.data, hdev->adv_data, len) == 0)
1185 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001186
1187 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1188 hdev->adv_data_len = len;
1189
1190 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001191
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001192 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001193}
1194
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001195static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 int ret = 0;
1198
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 BT_DBG("%s %p", hdev->name, hdev);
1200
1201 hci_req_lock(hdev);
1202
Johan Hovold94324962012-03-15 14:48:41 +01001203 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1204 ret = -ENODEV;
1205 goto done;
1206 }
1207
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001208 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1209 /* Check for rfkill but allow the HCI setup stage to
1210 * proceed (which in itself doesn't cause any RF activity).
1211 */
1212 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1213 ret = -ERFKILL;
1214 goto done;
1215 }
1216
1217 /* Check for valid public address or a configured static
1218 * random adddress, but let the HCI setup proceed to
1219 * be able to determine if there is a public address
1220 * or not.
1221 *
1222 * This check is only valid for BR/EDR controllers
1223 * since AMP controllers do not have an address.
1224 */
1225 if (hdev->dev_type == HCI_BREDR &&
1226 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1227 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1228 ret = -EADDRNOTAVAIL;
1229 goto done;
1230 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001231 }
1232
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 if (test_bit(HCI_UP, &hdev->flags)) {
1234 ret = -EALREADY;
1235 goto done;
1236 }
1237
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 if (hdev->open(hdev)) {
1239 ret = -EIO;
1240 goto done;
1241 }
1242
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001243 atomic_set(&hdev->cmd_cnt, 1);
1244 set_bit(HCI_INIT, &hdev->flags);
1245
1246 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1247 ret = hdev->setup(hdev);
1248
1249 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001250 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1251 set_bit(HCI_RAW, &hdev->flags);
1252
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001253 if (!test_bit(HCI_RAW, &hdev->flags) &&
1254 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001255 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 }
1257
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001258 clear_bit(HCI_INIT, &hdev->flags);
1259
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 if (!ret) {
1261 hci_dev_hold(hdev);
1262 set_bit(HCI_UP, &hdev->flags);
1263 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001264 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001266 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001267 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001268 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001269 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001270 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001271 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001273 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001274 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001275 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
1277 skb_queue_purge(&hdev->cmd_q);
1278 skb_queue_purge(&hdev->rx_q);
1279
1280 if (hdev->flush)
1281 hdev->flush(hdev);
1282
1283 if (hdev->sent_cmd) {
1284 kfree_skb(hdev->sent_cmd);
1285 hdev->sent_cmd = NULL;
1286 }
1287
1288 hdev->close(hdev);
1289 hdev->flags = 0;
1290 }
1291
1292done:
1293 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 return ret;
1295}
1296
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001297/* ---- HCI ioctl helpers ---- */
1298
1299int hci_dev_open(__u16 dev)
1300{
1301 struct hci_dev *hdev;
1302 int err;
1303
1304 hdev = hci_dev_get(dev);
1305 if (!hdev)
1306 return -ENODEV;
1307
Johan Hedberge1d08f42013-10-01 22:44:50 +03001308 /* We need to ensure that no other power on/off work is pending
1309 * before proceeding to call hci_dev_do_open. This is
1310 * particularly important if the setup procedure has not yet
1311 * completed.
1312 */
1313 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1314 cancel_delayed_work(&hdev->power_off);
1315
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001316 /* After this call it is guaranteed that the setup procedure
1317 * has finished. This means that error conditions like RFKILL
1318 * or no valid public or static random address apply.
1319 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001320 flush_workqueue(hdev->req_workqueue);
1321
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001322 err = hci_dev_do_open(hdev);
1323
1324 hci_dev_put(hdev);
1325
1326 return err;
1327}
1328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329static int hci_dev_do_close(struct hci_dev *hdev)
1330{
1331 BT_DBG("%s %p", hdev->name, hdev);
1332
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001333 cancel_delayed_work(&hdev->power_off);
1334
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 hci_req_cancel(hdev, ENODEV);
1336 hci_req_lock(hdev);
1337
1338 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001339 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 hci_req_unlock(hdev);
1341 return 0;
1342 }
1343
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001344 /* Flush RX and TX works */
1345 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001346 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001348 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001349 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001350 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001351 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001352 }
1353
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001354 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001355 cancel_delayed_work(&hdev->service_cache);
1356
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001357 cancel_delayed_work_sync(&hdev->le_scan_disable);
1358
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001359 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001360 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001362 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
1364 hci_notify(hdev, HCI_DEV_DOWN);
1365
1366 if (hdev->flush)
1367 hdev->flush(hdev);
1368
1369 /* Reset device */
1370 skb_queue_purge(&hdev->cmd_q);
1371 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001372 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001373 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001375 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 clear_bit(HCI_INIT, &hdev->flags);
1377 }
1378
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001379 /* flush cmd work */
1380 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
1382 /* Drop queues */
1383 skb_queue_purge(&hdev->rx_q);
1384 skb_queue_purge(&hdev->cmd_q);
1385 skb_queue_purge(&hdev->raw_q);
1386
1387 /* Drop last sent command */
1388 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001389 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 kfree_skb(hdev->sent_cmd);
1391 hdev->sent_cmd = NULL;
1392 }
1393
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001394 kfree_skb(hdev->recv_evt);
1395 hdev->recv_evt = NULL;
1396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 /* After this point our queues are empty
1398 * and no tasks are scheduled. */
1399 hdev->close(hdev);
1400
Johan Hedberg35b973c2013-03-15 17:06:59 -05001401 /* Clear flags */
1402 hdev->flags = 0;
1403 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1404
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001405 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1406 if (hdev->dev_type == HCI_BREDR) {
1407 hci_dev_lock(hdev);
1408 mgmt_powered(hdev, 0);
1409 hci_dev_unlock(hdev);
1410 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001411 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001412
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001413 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001414 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001415
Johan Hedberge59fda82012-02-22 18:11:53 +02001416 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001417 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 hci_req_unlock(hdev);
1420
1421 hci_dev_put(hdev);
1422 return 0;
1423}
1424
1425int hci_dev_close(__u16 dev)
1426{
1427 struct hci_dev *hdev;
1428 int err;
1429
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001430 hdev = hci_dev_get(dev);
1431 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001433
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001434 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1435 err = -EBUSY;
1436 goto done;
1437 }
1438
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001439 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1440 cancel_delayed_work(&hdev->power_off);
1441
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001443
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001444done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 hci_dev_put(hdev);
1446 return err;
1447}
1448
1449int hci_dev_reset(__u16 dev)
1450{
1451 struct hci_dev *hdev;
1452 int ret = 0;
1453
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001454 hdev = hci_dev_get(dev);
1455 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 return -ENODEV;
1457
1458 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Marcel Holtmann808a0492013-08-26 20:57:58 -07001460 if (!test_bit(HCI_UP, &hdev->flags)) {
1461 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001465 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1466 ret = -EBUSY;
1467 goto done;
1468 }
1469
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 /* Drop queues */
1471 skb_queue_purge(&hdev->rx_q);
1472 skb_queue_purge(&hdev->cmd_q);
1473
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001474 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001475 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001477 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
1479 if (hdev->flush)
1480 hdev->flush(hdev);
1481
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001482 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001483 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
1485 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001486 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
1488done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 hci_req_unlock(hdev);
1490 hci_dev_put(hdev);
1491 return ret;
1492}
1493
1494int hci_dev_reset_stat(__u16 dev)
1495{
1496 struct hci_dev *hdev;
1497 int ret = 0;
1498
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001499 hdev = hci_dev_get(dev);
1500 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 return -ENODEV;
1502
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001503 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1504 ret = -EBUSY;
1505 goto done;
1506 }
1507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1509
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001510done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 return ret;
1513}
1514
1515int hci_dev_cmd(unsigned int cmd, void __user *arg)
1516{
1517 struct hci_dev *hdev;
1518 struct hci_dev_req dr;
1519 int err = 0;
1520
1521 if (copy_from_user(&dr, arg, sizeof(dr)))
1522 return -EFAULT;
1523
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001524 hdev = hci_dev_get(dr.dev_id);
1525 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 return -ENODEV;
1527
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001528 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1529 err = -EBUSY;
1530 goto done;
1531 }
1532
Johan Hedberg56f87902013-10-02 13:43:13 +03001533 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1534 err = -EOPNOTSUPP;
1535 goto done;
1536 }
1537
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 switch (cmd) {
1539 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001540 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1541 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 break;
1543
1544 case HCISETENCRYPT:
1545 if (!lmp_encrypt_capable(hdev)) {
1546 err = -EOPNOTSUPP;
1547 break;
1548 }
1549
1550 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1551 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001552 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1553 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 if (err)
1555 break;
1556 }
1557
Johan Hedberg01178cd2013-03-05 20:37:41 +02001558 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1559 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 break;
1561
1562 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001563 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1564 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 break;
1566
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001567 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001568 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1569 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001570 break;
1571
1572 case HCISETLINKMODE:
1573 hdev->link_mode = ((__u16) dr.dev_opt) &
1574 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1575 break;
1576
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 case HCISETPTYPE:
1578 hdev->pkt_type = (__u16) dr.dev_opt;
1579 break;
1580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001582 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1583 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 break;
1585
1586 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001587 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1588 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 break;
1590
1591 default:
1592 err = -EINVAL;
1593 break;
1594 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001595
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001596done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 hci_dev_put(hdev);
1598 return err;
1599}
1600
1601int hci_get_dev_list(void __user *arg)
1602{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001603 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 struct hci_dev_list_req *dl;
1605 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 int n = 0, size, err;
1607 __u16 dev_num;
1608
1609 if (get_user(dev_num, (__u16 __user *) arg))
1610 return -EFAULT;
1611
1612 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1613 return -EINVAL;
1614
1615 size = sizeof(*dl) + dev_num * sizeof(*dr);
1616
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001617 dl = kzalloc(size, GFP_KERNEL);
1618 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 return -ENOMEM;
1620
1621 dr = dl->dev_req;
1622
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001623 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001624 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001625 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001626 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001627
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001628 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1629 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 (dr + n)->dev_id = hdev->id;
1632 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001633
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 if (++n >= dev_num)
1635 break;
1636 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001637 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
1639 dl->dev_num = n;
1640 size = sizeof(*dl) + n * sizeof(*dr);
1641
1642 err = copy_to_user(arg, dl, size);
1643 kfree(dl);
1644
1645 return err ? -EFAULT : 0;
1646}
1647
1648int hci_get_dev_info(void __user *arg)
1649{
1650 struct hci_dev *hdev;
1651 struct hci_dev_info di;
1652 int err = 0;
1653
1654 if (copy_from_user(&di, arg, sizeof(di)))
1655 return -EFAULT;
1656
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001657 hdev = hci_dev_get(di.dev_id);
1658 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 return -ENODEV;
1660
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001661 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001662 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001663
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001664 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1665 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 strcpy(di.name, hdev->name);
1668 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001669 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 di.flags = hdev->flags;
1671 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001672 if (lmp_bredr_capable(hdev)) {
1673 di.acl_mtu = hdev->acl_mtu;
1674 di.acl_pkts = hdev->acl_pkts;
1675 di.sco_mtu = hdev->sco_mtu;
1676 di.sco_pkts = hdev->sco_pkts;
1677 } else {
1678 di.acl_mtu = hdev->le_mtu;
1679 di.acl_pkts = hdev->le_pkts;
1680 di.sco_mtu = 0;
1681 di.sco_pkts = 0;
1682 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 di.link_policy = hdev->link_policy;
1684 di.link_mode = hdev->link_mode;
1685
1686 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1687 memcpy(&di.features, &hdev->features, sizeof(di.features));
1688
1689 if (copy_to_user(arg, &di, sizeof(di)))
1690 err = -EFAULT;
1691
1692 hci_dev_put(hdev);
1693
1694 return err;
1695}
1696
1697/* ---- Interface to HCI drivers ---- */
1698
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001699static int hci_rfkill_set_block(void *data, bool blocked)
1700{
1701 struct hci_dev *hdev = data;
1702
1703 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1704
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001705 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1706 return -EBUSY;
1707
Johan Hedberg5e130362013-09-13 08:58:17 +03001708 if (blocked) {
1709 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001710 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1711 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001712 } else {
1713 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001714 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001715
1716 return 0;
1717}
1718
1719static const struct rfkill_ops hci_rfkill_ops = {
1720 .set_block = hci_rfkill_set_block,
1721};
1722
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001723static void hci_power_on(struct work_struct *work)
1724{
1725 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001726 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001727
1728 BT_DBG("%s", hdev->name);
1729
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001730 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001731 if (err < 0) {
1732 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001733 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001734 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001735
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001736 /* During the HCI setup phase, a few error conditions are
1737 * ignored and they need to be checked now. If they are still
1738 * valid, it is important to turn the device back off.
1739 */
1740 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1741 (hdev->dev_type == HCI_BREDR &&
1742 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1743 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001744 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1745 hci_dev_do_close(hdev);
1746 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001747 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1748 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001749 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001750
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001751 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001752 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001753}
1754
1755static void hci_power_off(struct work_struct *work)
1756{
Johan Hedberg32435532011-11-07 22:16:04 +02001757 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001758 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001759
1760 BT_DBG("%s", hdev->name);
1761
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001762 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001763}
1764
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001765static void hci_discov_off(struct work_struct *work)
1766{
1767 struct hci_dev *hdev;
1768 u8 scan = SCAN_PAGE;
1769
1770 hdev = container_of(work, struct hci_dev, discov_off.work);
1771
1772 BT_DBG("%s", hdev->name);
1773
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001774 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001775
1776 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1777
1778 hdev->discov_timeout = 0;
1779
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001780 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001781}
1782
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001783int hci_uuids_clear(struct hci_dev *hdev)
1784{
Johan Hedberg48210022013-01-27 00:31:28 +02001785 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001786
Johan Hedberg48210022013-01-27 00:31:28 +02001787 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1788 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001789 kfree(uuid);
1790 }
1791
1792 return 0;
1793}
1794
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001795int hci_link_keys_clear(struct hci_dev *hdev)
1796{
1797 struct list_head *p, *n;
1798
1799 list_for_each_safe(p, n, &hdev->link_keys) {
1800 struct link_key *key;
1801
1802 key = list_entry(p, struct link_key, list);
1803
1804 list_del(p);
1805 kfree(key);
1806 }
1807
1808 return 0;
1809}
1810
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001811int hci_smp_ltks_clear(struct hci_dev *hdev)
1812{
1813 struct smp_ltk *k, *tmp;
1814
1815 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1816 list_del(&k->list);
1817 kfree(k);
1818 }
1819
1820 return 0;
1821}
1822
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001823struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1824{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001825 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001826
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001827 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001828 if (bacmp(bdaddr, &k->bdaddr) == 0)
1829 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001830
1831 return NULL;
1832}
1833
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301834static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001835 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001836{
1837 /* Legacy key */
1838 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301839 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001840
1841 /* Debug keys are insecure so don't store them persistently */
1842 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301843 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001844
1845 /* Changed combination key and there's no previous one */
1846 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301847 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001848
1849 /* Security mode 3 case */
1850 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301851 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001852
1853 /* Neither local nor remote side had no-bonding as requirement */
1854 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301855 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001856
1857 /* Local side had dedicated bonding as requirement */
1858 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301859 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001860
1861 /* Remote side had dedicated bonding as requirement */
1862 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301863 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001864
1865 /* If none of the above criteria match, then don't store the key
1866 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301867 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001868}
1869
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001870struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001871{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001872 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001873
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001874 list_for_each_entry(k, &hdev->long_term_keys, list) {
1875 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001876 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001877 continue;
1878
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001879 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001880 }
1881
1882 return NULL;
1883}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001884
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001885struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001886 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001887{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001888 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001889
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001890 list_for_each_entry(k, &hdev->long_term_keys, list)
1891 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001892 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001893 return k;
1894
1895 return NULL;
1896}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001897
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001898int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001899 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001900{
1901 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301902 u8 old_key_type;
1903 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001904
1905 old_key = hci_find_link_key(hdev, bdaddr);
1906 if (old_key) {
1907 old_key_type = old_key->type;
1908 key = old_key;
1909 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001910 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001911 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1912 if (!key)
1913 return -ENOMEM;
1914 list_add(&key->list, &hdev->link_keys);
1915 }
1916
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001917 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001918
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001919 /* Some buggy controller combinations generate a changed
1920 * combination key for legacy pairing even when there's no
1921 * previous key */
1922 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001923 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001924 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001925 if (conn)
1926 conn->key_type = type;
1927 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001928
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001929 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001930 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001931 key->pin_len = pin_len;
1932
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001933 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001934 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001935 else
1936 key->type = type;
1937
Johan Hedberg4df378a2011-04-28 11:29:03 -07001938 if (!new_key)
1939 return 0;
1940
1941 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1942
Johan Hedberg744cf192011-11-08 20:40:14 +02001943 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001944
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301945 if (conn)
1946 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001947
1948 return 0;
1949}
1950
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001951int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001952 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001953 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001954{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001955 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001956
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001957 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1958 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001959
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001960 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1961 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001962 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001963 else {
1964 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001965 if (!key)
1966 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001967 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001968 }
1969
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001970 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001971 key->bdaddr_type = addr_type;
1972 memcpy(key->val, tk, sizeof(key->val));
1973 key->authenticated = authenticated;
1974 key->ediv = ediv;
1975 key->enc_size = enc_size;
1976 key->type = type;
1977 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001978
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001979 if (!new_key)
1980 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001981
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001982 if (type & HCI_SMP_LTK)
1983 mgmt_new_ltk(hdev, key, 1);
1984
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001985 return 0;
1986}
1987
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001988int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1989{
1990 struct link_key *key;
1991
1992 key = hci_find_link_key(hdev, bdaddr);
1993 if (!key)
1994 return -ENOENT;
1995
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001996 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001997
1998 list_del(&key->list);
1999 kfree(key);
2000
2001 return 0;
2002}
2003
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002004int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2005{
2006 struct smp_ltk *k, *tmp;
2007
2008 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2009 if (bacmp(bdaddr, &k->bdaddr))
2010 continue;
2011
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002012 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002013
2014 list_del(&k->list);
2015 kfree(k);
2016 }
2017
2018 return 0;
2019}
2020
Ville Tervo6bd32322011-02-16 16:32:41 +02002021/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002022static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002023{
2024 struct hci_dev *hdev = (void *) arg;
2025
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002026 if (hdev->sent_cmd) {
2027 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2028 u16 opcode = __le16_to_cpu(sent->opcode);
2029
2030 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2031 } else {
2032 BT_ERR("%s command tx timeout", hdev->name);
2033 }
2034
Ville Tervo6bd32322011-02-16 16:32:41 +02002035 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002036 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002037}
2038
Szymon Janc2763eda2011-03-22 13:12:22 +01002039struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002040 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002041{
2042 struct oob_data *data;
2043
2044 list_for_each_entry(data, &hdev->remote_oob_data, list)
2045 if (bacmp(bdaddr, &data->bdaddr) == 0)
2046 return data;
2047
2048 return NULL;
2049}
2050
2051int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2052{
2053 struct oob_data *data;
2054
2055 data = hci_find_remote_oob_data(hdev, bdaddr);
2056 if (!data)
2057 return -ENOENT;
2058
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002059 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002060
2061 list_del(&data->list);
2062 kfree(data);
2063
2064 return 0;
2065}
2066
2067int hci_remote_oob_data_clear(struct hci_dev *hdev)
2068{
2069 struct oob_data *data, *n;
2070
2071 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2072 list_del(&data->list);
2073 kfree(data);
2074 }
2075
2076 return 0;
2077}
2078
2079int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002080 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002081{
2082 struct oob_data *data;
2083
2084 data = hci_find_remote_oob_data(hdev, bdaddr);
2085
2086 if (!data) {
2087 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2088 if (!data)
2089 return -ENOMEM;
2090
2091 bacpy(&data->bdaddr, bdaddr);
2092 list_add(&data->list, &hdev->remote_oob_data);
2093 }
2094
2095 memcpy(data->hash, hash, sizeof(data->hash));
2096 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2097
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002098 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002099
2100 return 0;
2101}
2102
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002103struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002104{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002105 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002106
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002107 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002108 if (bacmp(bdaddr, &b->bdaddr) == 0)
2109 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002110
2111 return NULL;
2112}
2113
2114int hci_blacklist_clear(struct hci_dev *hdev)
2115{
2116 struct list_head *p, *n;
2117
2118 list_for_each_safe(p, n, &hdev->blacklist) {
2119 struct bdaddr_list *b;
2120
2121 b = list_entry(p, struct bdaddr_list, list);
2122
2123 list_del(p);
2124 kfree(b);
2125 }
2126
2127 return 0;
2128}
2129
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002130int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002131{
2132 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002133
2134 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2135 return -EBADF;
2136
Antti Julku5e762442011-08-25 16:48:02 +03002137 if (hci_blacklist_lookup(hdev, bdaddr))
2138 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002139
2140 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002141 if (!entry)
2142 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002143
2144 bacpy(&entry->bdaddr, bdaddr);
2145
2146 list_add(&entry->list, &hdev->blacklist);
2147
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002148 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002149}
2150
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002151int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002152{
2153 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002154
Szymon Janc1ec918c2011-11-16 09:32:21 +01002155 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002156 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002157
2158 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002159 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002160 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002161
2162 list_del(&entry->list);
2163 kfree(entry);
2164
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002165 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002166}
2167
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002168static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002169{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002170 if (status) {
2171 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002172
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002173 hci_dev_lock(hdev);
2174 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2175 hci_dev_unlock(hdev);
2176 return;
2177 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002178}
2179
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002180static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002181{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002182 /* General inquiry access code (GIAC) */
2183 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2184 struct hci_request req;
2185 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002186 int err;
2187
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002188 if (status) {
2189 BT_ERR("Failed to disable LE scanning: status %d", status);
2190 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002191 }
2192
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002193 switch (hdev->discovery.type) {
2194 case DISCOV_TYPE_LE:
2195 hci_dev_lock(hdev);
2196 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2197 hci_dev_unlock(hdev);
2198 break;
2199
2200 case DISCOV_TYPE_INTERLEAVED:
2201 hci_req_init(&req, hdev);
2202
2203 memset(&cp, 0, sizeof(cp));
2204 memcpy(&cp.lap, lap, sizeof(cp.lap));
2205 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2206 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2207
2208 hci_dev_lock(hdev);
2209
2210 hci_inquiry_cache_flush(hdev);
2211
2212 err = hci_req_run(&req, inquiry_complete);
2213 if (err) {
2214 BT_ERR("Inquiry request failed: err %d", err);
2215 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2216 }
2217
2218 hci_dev_unlock(hdev);
2219 break;
2220 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002221}
2222
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002223static void le_scan_disable_work(struct work_struct *work)
2224{
2225 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002226 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002227 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002228 struct hci_request req;
2229 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002230
2231 BT_DBG("%s", hdev->name);
2232
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002233 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002234
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002235 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002236 cp.enable = LE_SCAN_DISABLE;
2237 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002238
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002239 err = hci_req_run(&req, le_scan_disable_work_complete);
2240 if (err)
2241 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002242}
2243
David Herrmann9be0dab2012-04-22 14:39:57 +02002244/* Alloc HCI device */
2245struct hci_dev *hci_alloc_dev(void)
2246{
2247 struct hci_dev *hdev;
2248
2249 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2250 if (!hdev)
2251 return NULL;
2252
David Herrmannb1b813d2012-04-22 14:39:58 +02002253 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2254 hdev->esco_type = (ESCO_HV1);
2255 hdev->link_mode = (HCI_LM_ACCEPT);
2256 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002257 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2258 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002259
David Herrmannb1b813d2012-04-22 14:39:58 +02002260 hdev->sniff_max_interval = 800;
2261 hdev->sniff_min_interval = 80;
2262
2263 mutex_init(&hdev->lock);
2264 mutex_init(&hdev->req_lock);
2265
2266 INIT_LIST_HEAD(&hdev->mgmt_pending);
2267 INIT_LIST_HEAD(&hdev->blacklist);
2268 INIT_LIST_HEAD(&hdev->uuids);
2269 INIT_LIST_HEAD(&hdev->link_keys);
2270 INIT_LIST_HEAD(&hdev->long_term_keys);
2271 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002272 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002273
2274 INIT_WORK(&hdev->rx_work, hci_rx_work);
2275 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2276 INIT_WORK(&hdev->tx_work, hci_tx_work);
2277 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002278
David Herrmannb1b813d2012-04-22 14:39:58 +02002279 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2280 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2281 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2282
David Herrmannb1b813d2012-04-22 14:39:58 +02002283 skb_queue_head_init(&hdev->rx_q);
2284 skb_queue_head_init(&hdev->cmd_q);
2285 skb_queue_head_init(&hdev->raw_q);
2286
2287 init_waitqueue_head(&hdev->req_wait_q);
2288
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002289 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002290
David Herrmannb1b813d2012-04-22 14:39:58 +02002291 hci_init_sysfs(hdev);
2292 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002293
2294 return hdev;
2295}
2296EXPORT_SYMBOL(hci_alloc_dev);
2297
2298/* Free HCI device */
2299void hci_free_dev(struct hci_dev *hdev)
2300{
David Herrmann9be0dab2012-04-22 14:39:57 +02002301 /* will free via device release */
2302 put_device(&hdev->dev);
2303}
2304EXPORT_SYMBOL(hci_free_dev);
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306/* Register HCI device */
2307int hci_register_dev(struct hci_dev *hdev)
2308{
David Herrmannb1b813d2012-04-22 14:39:58 +02002309 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
David Herrmann010666a2012-01-07 15:47:07 +01002311 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 return -EINVAL;
2313
Mat Martineau08add512011-11-02 16:18:36 -07002314 /* Do not allow HCI_AMP devices to register at index 0,
2315 * so the index can be used as the AMP controller ID.
2316 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002317 switch (hdev->dev_type) {
2318 case HCI_BREDR:
2319 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2320 break;
2321 case HCI_AMP:
2322 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2323 break;
2324 default:
2325 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002327
Sasha Levin3df92b32012-05-27 22:36:56 +02002328 if (id < 0)
2329 return id;
2330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 sprintf(hdev->name, "hci%d", id);
2332 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002333
2334 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2335
Kees Cookd8537542013-07-03 15:04:57 -07002336 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2337 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002338 if (!hdev->workqueue) {
2339 error = -ENOMEM;
2340 goto err;
2341 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002342
Kees Cookd8537542013-07-03 15:04:57 -07002343 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2344 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002345 if (!hdev->req_workqueue) {
2346 destroy_workqueue(hdev->workqueue);
2347 error = -ENOMEM;
2348 goto err;
2349 }
2350
David Herrmann33ca9542011-10-08 14:58:49 +02002351 error = hci_add_sysfs(hdev);
2352 if (error < 0)
2353 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002355 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002356 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2357 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002358 if (hdev->rfkill) {
2359 if (rfkill_register(hdev->rfkill) < 0) {
2360 rfkill_destroy(hdev->rfkill);
2361 hdev->rfkill = NULL;
2362 }
2363 }
2364
Johan Hedberg5e130362013-09-13 08:58:17 +03002365 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2366 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2367
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002368 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002369 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002370
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002371 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002372 /* Assume BR/EDR support until proven otherwise (such as
2373 * through reading supported features during init.
2374 */
2375 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2376 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002377
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002378 write_lock(&hci_dev_list_lock);
2379 list_add(&hdev->list, &hci_dev_list);
2380 write_unlock(&hci_dev_list_lock);
2381
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002383 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384
Johan Hedberg19202572013-01-14 22:33:51 +02002385 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002386
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002388
David Herrmann33ca9542011-10-08 14:58:49 +02002389err_wqueue:
2390 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002391 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002392err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002393 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002394
David Herrmann33ca9542011-10-08 14:58:49 +02002395 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396}
2397EXPORT_SYMBOL(hci_register_dev);
2398
2399/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002400void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401{
Sasha Levin3df92b32012-05-27 22:36:56 +02002402 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002403
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002404 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405
Johan Hovold94324962012-03-15 14:48:41 +01002406 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2407
Sasha Levin3df92b32012-05-27 22:36:56 +02002408 id = hdev->id;
2409
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002410 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002412 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413
2414 hci_dev_do_close(hdev);
2415
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302416 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002417 kfree_skb(hdev->reassembly[i]);
2418
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002419 cancel_work_sync(&hdev->power_on);
2420
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002421 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002422 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002423 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002424 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002425 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002426 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002427
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002428 /* mgmt_index_removed should take care of emptying the
2429 * pending list */
2430 BUG_ON(!list_empty(&hdev->mgmt_pending));
2431
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 hci_notify(hdev, HCI_DEV_UNREG);
2433
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002434 if (hdev->rfkill) {
2435 rfkill_unregister(hdev->rfkill);
2436 rfkill_destroy(hdev->rfkill);
2437 }
2438
David Herrmannce242972011-10-08 14:58:48 +02002439 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002440
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002441 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002442 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002443
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002444 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002445 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002446 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002447 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002448 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002449 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002450 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002451
David Herrmanndc946bd2012-01-07 15:47:24 +01002452 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002453
2454 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455}
2456EXPORT_SYMBOL(hci_unregister_dev);
2457
2458/* Suspend HCI device */
2459int hci_suspend_dev(struct hci_dev *hdev)
2460{
2461 hci_notify(hdev, HCI_DEV_SUSPEND);
2462 return 0;
2463}
2464EXPORT_SYMBOL(hci_suspend_dev);
2465
2466/* Resume HCI device */
2467int hci_resume_dev(struct hci_dev *hdev)
2468{
2469 hci_notify(hdev, HCI_DEV_RESUME);
2470 return 0;
2471}
2472EXPORT_SYMBOL(hci_resume_dev);
2473
Marcel Holtmann76bca882009-11-18 00:40:39 +01002474/* Receive frame from HCI drivers */
2475int hci_recv_frame(struct sk_buff *skb)
2476{
2477 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2478 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002479 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002480 kfree_skb(skb);
2481 return -ENXIO;
2482 }
2483
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002484 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002485 bt_cb(skb)->incoming = 1;
2486
2487 /* Time stamp */
2488 __net_timestamp(skb);
2489
Marcel Holtmann76bca882009-11-18 00:40:39 +01002490 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002491 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002492
Marcel Holtmann76bca882009-11-18 00:40:39 +01002493 return 0;
2494}
2495EXPORT_SYMBOL(hci_recv_frame);
2496
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302497static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002498 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302499{
2500 int len = 0;
2501 int hlen = 0;
2502 int remain = count;
2503 struct sk_buff *skb;
2504 struct bt_skb_cb *scb;
2505
2506 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002507 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302508 return -EILSEQ;
2509
2510 skb = hdev->reassembly[index];
2511
2512 if (!skb) {
2513 switch (type) {
2514 case HCI_ACLDATA_PKT:
2515 len = HCI_MAX_FRAME_SIZE;
2516 hlen = HCI_ACL_HDR_SIZE;
2517 break;
2518 case HCI_EVENT_PKT:
2519 len = HCI_MAX_EVENT_SIZE;
2520 hlen = HCI_EVENT_HDR_SIZE;
2521 break;
2522 case HCI_SCODATA_PKT:
2523 len = HCI_MAX_SCO_SIZE;
2524 hlen = HCI_SCO_HDR_SIZE;
2525 break;
2526 }
2527
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002528 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302529 if (!skb)
2530 return -ENOMEM;
2531
2532 scb = (void *) skb->cb;
2533 scb->expect = hlen;
2534 scb->pkt_type = type;
2535
2536 skb->dev = (void *) hdev;
2537 hdev->reassembly[index] = skb;
2538 }
2539
2540 while (count) {
2541 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002542 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302543
2544 memcpy(skb_put(skb, len), data, len);
2545
2546 count -= len;
2547 data += len;
2548 scb->expect -= len;
2549 remain = count;
2550
2551 switch (type) {
2552 case HCI_EVENT_PKT:
2553 if (skb->len == HCI_EVENT_HDR_SIZE) {
2554 struct hci_event_hdr *h = hci_event_hdr(skb);
2555 scb->expect = h->plen;
2556
2557 if (skb_tailroom(skb) < scb->expect) {
2558 kfree_skb(skb);
2559 hdev->reassembly[index] = NULL;
2560 return -ENOMEM;
2561 }
2562 }
2563 break;
2564
2565 case HCI_ACLDATA_PKT:
2566 if (skb->len == HCI_ACL_HDR_SIZE) {
2567 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2568 scb->expect = __le16_to_cpu(h->dlen);
2569
2570 if (skb_tailroom(skb) < scb->expect) {
2571 kfree_skb(skb);
2572 hdev->reassembly[index] = NULL;
2573 return -ENOMEM;
2574 }
2575 }
2576 break;
2577
2578 case HCI_SCODATA_PKT:
2579 if (skb->len == HCI_SCO_HDR_SIZE) {
2580 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2581 scb->expect = h->dlen;
2582
2583 if (skb_tailroom(skb) < scb->expect) {
2584 kfree_skb(skb);
2585 hdev->reassembly[index] = NULL;
2586 return -ENOMEM;
2587 }
2588 }
2589 break;
2590 }
2591
2592 if (scb->expect == 0) {
2593 /* Complete frame */
2594
2595 bt_cb(skb)->pkt_type = type;
2596 hci_recv_frame(skb);
2597
2598 hdev->reassembly[index] = NULL;
2599 return remain;
2600 }
2601 }
2602
2603 return remain;
2604}
2605
Marcel Holtmannef222012007-07-11 06:42:04 +02002606int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2607{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302608 int rem = 0;
2609
Marcel Holtmannef222012007-07-11 06:42:04 +02002610 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2611 return -EILSEQ;
2612
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002613 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002614 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302615 if (rem < 0)
2616 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002617
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302618 data += (count - rem);
2619 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002620 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002621
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302622 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002623}
2624EXPORT_SYMBOL(hci_recv_fragment);
2625
Suraj Sumangala99811512010-07-14 13:02:19 +05302626#define STREAM_REASSEMBLY 0
2627
2628int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2629{
2630 int type;
2631 int rem = 0;
2632
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002633 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302634 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2635
2636 if (!skb) {
2637 struct { char type; } *pkt;
2638
2639 /* Start of the frame */
2640 pkt = data;
2641 type = pkt->type;
2642
2643 data++;
2644 count--;
2645 } else
2646 type = bt_cb(skb)->pkt_type;
2647
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002648 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002649 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302650 if (rem < 0)
2651 return rem;
2652
2653 data += (count - rem);
2654 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002655 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302656
2657 return rem;
2658}
2659EXPORT_SYMBOL(hci_recv_stream_fragment);
2660
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661/* ---- Interface to upper protocols ---- */
2662
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663int hci_register_cb(struct hci_cb *cb)
2664{
2665 BT_DBG("%p name %s", cb, cb->name);
2666
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002667 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002669 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
2671 return 0;
2672}
2673EXPORT_SYMBOL(hci_register_cb);
2674
2675int hci_unregister_cb(struct hci_cb *cb)
2676{
2677 BT_DBG("%p name %s", cb, cb->name);
2678
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002679 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002681 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682
2683 return 0;
2684}
2685EXPORT_SYMBOL(hci_unregister_cb);
2686
2687static int hci_send_frame(struct sk_buff *skb)
2688{
2689 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2690
2691 if (!hdev) {
2692 kfree_skb(skb);
2693 return -ENODEV;
2694 }
2695
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002696 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002698 /* Time stamp */
2699 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002701 /* Send copy to monitor */
2702 hci_send_to_monitor(hdev, skb);
2703
2704 if (atomic_read(&hdev->promisc)) {
2705 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002706 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 }
2708
2709 /* Get rid of skb owner, prior to sending to the driver. */
2710 skb_orphan(skb);
2711
2712 return hdev->send(skb);
2713}
2714
Johan Hedberg3119ae92013-03-05 20:37:44 +02002715void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2716{
2717 skb_queue_head_init(&req->cmd_q);
2718 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002719 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002720}
2721
2722int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2723{
2724 struct hci_dev *hdev = req->hdev;
2725 struct sk_buff *skb;
2726 unsigned long flags;
2727
2728 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2729
Andre Guedes5d73e032013-03-08 11:20:16 -03002730 /* If an error occured during request building, remove all HCI
2731 * commands queued on the HCI request queue.
2732 */
2733 if (req->err) {
2734 skb_queue_purge(&req->cmd_q);
2735 return req->err;
2736 }
2737
Johan Hedberg3119ae92013-03-05 20:37:44 +02002738 /* Do not allow empty requests */
2739 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002740 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002741
2742 skb = skb_peek_tail(&req->cmd_q);
2743 bt_cb(skb)->req.complete = complete;
2744
2745 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2746 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2747 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2748
2749 queue_work(hdev->workqueue, &hdev->cmd_work);
2750
2751 return 0;
2752}
2753
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002754static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002755 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756{
2757 int len = HCI_COMMAND_HDR_SIZE + plen;
2758 struct hci_command_hdr *hdr;
2759 struct sk_buff *skb;
2760
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002762 if (!skb)
2763 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
2765 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002766 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 hdr->plen = plen;
2768
2769 if (plen)
2770 memcpy(skb_put(skb, plen), param, plen);
2771
2772 BT_DBG("skb len %d", skb->len);
2773
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002774 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002776
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002777 return skb;
2778}
2779
2780/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002781int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2782 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002783{
2784 struct sk_buff *skb;
2785
2786 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2787
2788 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2789 if (!skb) {
2790 BT_ERR("%s no memory for command", hdev->name);
2791 return -ENOMEM;
2792 }
2793
Johan Hedberg11714b32013-03-05 20:37:47 +02002794 /* Stand-alone HCI commands must be flaged as
2795 * single-command requests.
2796 */
2797 bt_cb(skb)->req.start = true;
2798
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002800 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
2802 return 0;
2803}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
Johan Hedberg71c76a12013-03-05 20:37:46 +02002805/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002806void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2807 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002808{
2809 struct hci_dev *hdev = req->hdev;
2810 struct sk_buff *skb;
2811
2812 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2813
Andre Guedes34739c12013-03-08 11:20:18 -03002814 /* If an error occured during request building, there is no point in
2815 * queueing the HCI command. We can simply return.
2816 */
2817 if (req->err)
2818 return;
2819
Johan Hedberg71c76a12013-03-05 20:37:46 +02002820 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2821 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002822 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2823 hdev->name, opcode);
2824 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002825 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002826 }
2827
2828 if (skb_queue_empty(&req->cmd_q))
2829 bt_cb(skb)->req.start = true;
2830
Johan Hedberg02350a72013-04-03 21:50:29 +03002831 bt_cb(skb)->req.event = event;
2832
Johan Hedberg71c76a12013-03-05 20:37:46 +02002833 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002834}
2835
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002836void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2837 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002838{
2839 hci_req_add_ev(req, opcode, plen, param, 0);
2840}
2841
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002843void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844{
2845 struct hci_command_hdr *hdr;
2846
2847 if (!hdev->sent_cmd)
2848 return NULL;
2849
2850 hdr = (void *) hdev->sent_cmd->data;
2851
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002852 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 return NULL;
2854
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002855 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856
2857 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2858}
2859
2860/* Send ACL data */
2861static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2862{
2863 struct hci_acl_hdr *hdr;
2864 int len = skb->len;
2865
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002866 skb_push(skb, HCI_ACL_HDR_SIZE);
2867 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002868 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002869 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2870 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871}
2872
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002873static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002874 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002876 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 struct hci_dev *hdev = conn->hdev;
2878 struct sk_buff *list;
2879
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002880 skb->len = skb_headlen(skb);
2881 skb->data_len = 0;
2882
2883 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002884
2885 switch (hdev->dev_type) {
2886 case HCI_BREDR:
2887 hci_add_acl_hdr(skb, conn->handle, flags);
2888 break;
2889 case HCI_AMP:
2890 hci_add_acl_hdr(skb, chan->handle, flags);
2891 break;
2892 default:
2893 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2894 return;
2895 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002896
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002897 list = skb_shinfo(skb)->frag_list;
2898 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 /* Non fragmented */
2900 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2901
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002902 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 } else {
2904 /* Fragmented */
2905 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2906
2907 skb_shinfo(skb)->frag_list = NULL;
2908
2909 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002910 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002912 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002913
2914 flags &= ~ACL_START;
2915 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 do {
2917 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002920 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002921 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
2923 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2924
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002925 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 } while (list);
2927
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002928 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002930}
2931
2932void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2933{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002934 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002935
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002936 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002937
2938 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002939
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002940 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002942 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944
2945/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002946void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947{
2948 struct hci_dev *hdev = conn->hdev;
2949 struct hci_sco_hdr hdr;
2950
2951 BT_DBG("%s len %d", hdev->name, skb->len);
2952
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002953 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 hdr.dlen = skb->len;
2955
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002956 skb_push(skb, HCI_SCO_HDR_SIZE);
2957 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002958 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959
2960 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002961 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002962
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002964 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966
2967/* ---- HCI TX task (outgoing data) ---- */
2968
2969/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002970static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2971 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972{
2973 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002974 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002975 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002977 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002979
2980 rcu_read_lock();
2981
2982 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002983 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002985
2986 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2987 continue;
2988
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 num++;
2990
2991 if (c->sent < min) {
2992 min = c->sent;
2993 conn = c;
2994 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002995
2996 if (hci_conn_num(hdev, type) == num)
2997 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 }
2999
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003000 rcu_read_unlock();
3001
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003003 int cnt, q;
3004
3005 switch (conn->type) {
3006 case ACL_LINK:
3007 cnt = hdev->acl_cnt;
3008 break;
3009 case SCO_LINK:
3010 case ESCO_LINK:
3011 cnt = hdev->sco_cnt;
3012 break;
3013 case LE_LINK:
3014 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3015 break;
3016 default:
3017 cnt = 0;
3018 BT_ERR("Unknown link type");
3019 }
3020
3021 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 *quote = q ? q : 1;
3023 } else
3024 *quote = 0;
3025
3026 BT_DBG("conn %p quote %d", conn, *quote);
3027 return conn;
3028}
3029
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003030static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031{
3032 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003033 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034
Ville Tervobae1f5d92011-02-10 22:38:53 -03003035 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003037 rcu_read_lock();
3038
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003040 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003041 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003042 BT_ERR("%s killing stalled connection %pMR",
3043 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003044 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 }
3046 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003047
3048 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049}
3050
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003051static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3052 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003053{
3054 struct hci_conn_hash *h = &hdev->conn_hash;
3055 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003056 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003057 struct hci_conn *conn;
3058 int cnt, q, conn_num = 0;
3059
3060 BT_DBG("%s", hdev->name);
3061
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003062 rcu_read_lock();
3063
3064 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003065 struct hci_chan *tmp;
3066
3067 if (conn->type != type)
3068 continue;
3069
3070 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3071 continue;
3072
3073 conn_num++;
3074
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003075 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003076 struct sk_buff *skb;
3077
3078 if (skb_queue_empty(&tmp->data_q))
3079 continue;
3080
3081 skb = skb_peek(&tmp->data_q);
3082 if (skb->priority < cur_prio)
3083 continue;
3084
3085 if (skb->priority > cur_prio) {
3086 num = 0;
3087 min = ~0;
3088 cur_prio = skb->priority;
3089 }
3090
3091 num++;
3092
3093 if (conn->sent < min) {
3094 min = conn->sent;
3095 chan = tmp;
3096 }
3097 }
3098
3099 if (hci_conn_num(hdev, type) == conn_num)
3100 break;
3101 }
3102
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003103 rcu_read_unlock();
3104
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003105 if (!chan)
3106 return NULL;
3107
3108 switch (chan->conn->type) {
3109 case ACL_LINK:
3110 cnt = hdev->acl_cnt;
3111 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003112 case AMP_LINK:
3113 cnt = hdev->block_cnt;
3114 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003115 case SCO_LINK:
3116 case ESCO_LINK:
3117 cnt = hdev->sco_cnt;
3118 break;
3119 case LE_LINK:
3120 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3121 break;
3122 default:
3123 cnt = 0;
3124 BT_ERR("Unknown link type");
3125 }
3126
3127 q = cnt / num;
3128 *quote = q ? q : 1;
3129 BT_DBG("chan %p quote %d", chan, *quote);
3130 return chan;
3131}
3132
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003133static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3134{
3135 struct hci_conn_hash *h = &hdev->conn_hash;
3136 struct hci_conn *conn;
3137 int num = 0;
3138
3139 BT_DBG("%s", hdev->name);
3140
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003141 rcu_read_lock();
3142
3143 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003144 struct hci_chan *chan;
3145
3146 if (conn->type != type)
3147 continue;
3148
3149 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3150 continue;
3151
3152 num++;
3153
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003154 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003155 struct sk_buff *skb;
3156
3157 if (chan->sent) {
3158 chan->sent = 0;
3159 continue;
3160 }
3161
3162 if (skb_queue_empty(&chan->data_q))
3163 continue;
3164
3165 skb = skb_peek(&chan->data_q);
3166 if (skb->priority >= HCI_PRIO_MAX - 1)
3167 continue;
3168
3169 skb->priority = HCI_PRIO_MAX - 1;
3170
3171 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003172 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003173 }
3174
3175 if (hci_conn_num(hdev, type) == num)
3176 break;
3177 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003178
3179 rcu_read_unlock();
3180
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003181}
3182
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003183static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3184{
3185 /* Calculate count of blocks used by this packet */
3186 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3187}
3188
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003189static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 if (!test_bit(HCI_RAW, &hdev->flags)) {
3192 /* ACL tx timeout must be longer than maximum
3193 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003194 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003195 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003196 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003198}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003200static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003201{
3202 unsigned int cnt = hdev->acl_cnt;
3203 struct hci_chan *chan;
3204 struct sk_buff *skb;
3205 int quote;
3206
3207 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003208
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003209 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003210 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003211 u32 priority = (skb_peek(&chan->data_q))->priority;
3212 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003213 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003214 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003215
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003216 /* Stop if priority has changed */
3217 if (skb->priority < priority)
3218 break;
3219
3220 skb = skb_dequeue(&chan->data_q);
3221
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003222 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003223 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003224
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 hci_send_frame(skb);
3226 hdev->acl_last_tx = jiffies;
3227
3228 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003229 chan->sent++;
3230 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 }
3232 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003233
3234 if (cnt != hdev->acl_cnt)
3235 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236}
3237
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003238static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003239{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003240 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003241 struct hci_chan *chan;
3242 struct sk_buff *skb;
3243 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003244 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003245
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003246 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003247
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003248 BT_DBG("%s", hdev->name);
3249
3250 if (hdev->dev_type == HCI_AMP)
3251 type = AMP_LINK;
3252 else
3253 type = ACL_LINK;
3254
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003255 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003256 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003257 u32 priority = (skb_peek(&chan->data_q))->priority;
3258 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3259 int blocks;
3260
3261 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003262 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003263
3264 /* Stop if priority has changed */
3265 if (skb->priority < priority)
3266 break;
3267
3268 skb = skb_dequeue(&chan->data_q);
3269
3270 blocks = __get_blocks(hdev, skb);
3271 if (blocks > hdev->block_cnt)
3272 return;
3273
3274 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003275 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003276
3277 hci_send_frame(skb);
3278 hdev->acl_last_tx = jiffies;
3279
3280 hdev->block_cnt -= blocks;
3281 quote -= blocks;
3282
3283 chan->sent += blocks;
3284 chan->conn->sent += blocks;
3285 }
3286 }
3287
3288 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003289 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003290}
3291
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003292static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003293{
3294 BT_DBG("%s", hdev->name);
3295
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003296 /* No ACL link over BR/EDR controller */
3297 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3298 return;
3299
3300 /* No AMP link over AMP controller */
3301 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003302 return;
3303
3304 switch (hdev->flow_ctl_mode) {
3305 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3306 hci_sched_acl_pkt(hdev);
3307 break;
3308
3309 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3310 hci_sched_acl_blk(hdev);
3311 break;
3312 }
3313}
3314
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003316static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317{
3318 struct hci_conn *conn;
3319 struct sk_buff *skb;
3320 int quote;
3321
3322 BT_DBG("%s", hdev->name);
3323
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003324 if (!hci_conn_num(hdev, SCO_LINK))
3325 return;
3326
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3328 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3329 BT_DBG("skb %p len %d", skb, skb->len);
3330 hci_send_frame(skb);
3331
3332 conn->sent++;
3333 if (conn->sent == ~0)
3334 conn->sent = 0;
3335 }
3336 }
3337}
3338
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003339static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003340{
3341 struct hci_conn *conn;
3342 struct sk_buff *skb;
3343 int quote;
3344
3345 BT_DBG("%s", hdev->name);
3346
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003347 if (!hci_conn_num(hdev, ESCO_LINK))
3348 return;
3349
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003350 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3351 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003352 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3353 BT_DBG("skb %p len %d", skb, skb->len);
3354 hci_send_frame(skb);
3355
3356 conn->sent++;
3357 if (conn->sent == ~0)
3358 conn->sent = 0;
3359 }
3360 }
3361}
3362
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003363static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003364{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003365 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003366 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003367 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003368
3369 BT_DBG("%s", hdev->name);
3370
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003371 if (!hci_conn_num(hdev, LE_LINK))
3372 return;
3373
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003374 if (!test_bit(HCI_RAW, &hdev->flags)) {
3375 /* LE tx timeout must be longer than maximum
3376 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003377 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003378 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003379 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003380 }
3381
3382 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003383 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003384 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003385 u32 priority = (skb_peek(&chan->data_q))->priority;
3386 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003387 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003388 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003389
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003390 /* Stop if priority has changed */
3391 if (skb->priority < priority)
3392 break;
3393
3394 skb = skb_dequeue(&chan->data_q);
3395
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003396 hci_send_frame(skb);
3397 hdev->le_last_tx = jiffies;
3398
3399 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003400 chan->sent++;
3401 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003402 }
3403 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003404
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003405 if (hdev->le_pkts)
3406 hdev->le_cnt = cnt;
3407 else
3408 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003409
3410 if (cnt != tmp)
3411 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003412}
3413
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003414static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003416 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 struct sk_buff *skb;
3418
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003419 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003420 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421
Marcel Holtmann52de5992013-09-03 18:08:38 -07003422 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3423 /* Schedule queues and send stuff to HCI driver */
3424 hci_sched_acl(hdev);
3425 hci_sched_sco(hdev);
3426 hci_sched_esco(hdev);
3427 hci_sched_le(hdev);
3428 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003429
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 /* Send next queued raw (unknown type) packet */
3431 while ((skb = skb_dequeue(&hdev->raw_q)))
3432 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433}
3434
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003435/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436
3437/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003438static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439{
3440 struct hci_acl_hdr *hdr = (void *) skb->data;
3441 struct hci_conn *conn;
3442 __u16 handle, flags;
3443
3444 skb_pull(skb, HCI_ACL_HDR_SIZE);
3445
3446 handle = __le16_to_cpu(hdr->handle);
3447 flags = hci_flags(handle);
3448 handle = hci_handle(handle);
3449
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003450 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003451 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452
3453 hdev->stat.acl_rx++;
3454
3455 hci_dev_lock(hdev);
3456 conn = hci_conn_hash_lookup_handle(hdev, handle);
3457 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003458
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003460 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003461
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003463 l2cap_recv_acldata(conn, skb, flags);
3464 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003466 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003467 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 }
3469
3470 kfree_skb(skb);
3471}
3472
3473/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003474static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475{
3476 struct hci_sco_hdr *hdr = (void *) skb->data;
3477 struct hci_conn *conn;
3478 __u16 handle;
3479
3480 skb_pull(skb, HCI_SCO_HDR_SIZE);
3481
3482 handle = __le16_to_cpu(hdr->handle);
3483
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003484 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485
3486 hdev->stat.sco_rx++;
3487
3488 hci_dev_lock(hdev);
3489 conn = hci_conn_hash_lookup_handle(hdev, handle);
3490 hci_dev_unlock(hdev);
3491
3492 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003494 sco_recv_scodata(conn, skb);
3495 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003497 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003498 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 }
3500
3501 kfree_skb(skb);
3502}
3503
Johan Hedberg9238f362013-03-05 20:37:48 +02003504static bool hci_req_is_complete(struct hci_dev *hdev)
3505{
3506 struct sk_buff *skb;
3507
3508 skb = skb_peek(&hdev->cmd_q);
3509 if (!skb)
3510 return true;
3511
3512 return bt_cb(skb)->req.start;
3513}
3514
Johan Hedberg42c6b122013-03-05 20:37:49 +02003515static void hci_resend_last(struct hci_dev *hdev)
3516{
3517 struct hci_command_hdr *sent;
3518 struct sk_buff *skb;
3519 u16 opcode;
3520
3521 if (!hdev->sent_cmd)
3522 return;
3523
3524 sent = (void *) hdev->sent_cmd->data;
3525 opcode = __le16_to_cpu(sent->opcode);
3526 if (opcode == HCI_OP_RESET)
3527 return;
3528
3529 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3530 if (!skb)
3531 return;
3532
3533 skb_queue_head(&hdev->cmd_q, skb);
3534 queue_work(hdev->workqueue, &hdev->cmd_work);
3535}
3536
Johan Hedberg9238f362013-03-05 20:37:48 +02003537void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3538{
3539 hci_req_complete_t req_complete = NULL;
3540 struct sk_buff *skb;
3541 unsigned long flags;
3542
3543 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3544
Johan Hedberg42c6b122013-03-05 20:37:49 +02003545 /* If the completed command doesn't match the last one that was
3546 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003547 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003548 if (!hci_sent_cmd_data(hdev, opcode)) {
3549 /* Some CSR based controllers generate a spontaneous
3550 * reset complete event during init and any pending
3551 * command will never be completed. In such a case we
3552 * need to resend whatever was the last sent
3553 * command.
3554 */
3555 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3556 hci_resend_last(hdev);
3557
Johan Hedberg9238f362013-03-05 20:37:48 +02003558 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003559 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003560
3561 /* If the command succeeded and there's still more commands in
3562 * this request the request is not yet complete.
3563 */
3564 if (!status && !hci_req_is_complete(hdev))
3565 return;
3566
3567 /* If this was the last command in a request the complete
3568 * callback would be found in hdev->sent_cmd instead of the
3569 * command queue (hdev->cmd_q).
3570 */
3571 if (hdev->sent_cmd) {
3572 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003573
3574 if (req_complete) {
3575 /* We must set the complete callback to NULL to
3576 * avoid calling the callback more than once if
3577 * this function gets called again.
3578 */
3579 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3580
Johan Hedberg9238f362013-03-05 20:37:48 +02003581 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003582 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003583 }
3584
3585 /* Remove all pending commands belonging to this request */
3586 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3587 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3588 if (bt_cb(skb)->req.start) {
3589 __skb_queue_head(&hdev->cmd_q, skb);
3590 break;
3591 }
3592
3593 req_complete = bt_cb(skb)->req.complete;
3594 kfree_skb(skb);
3595 }
3596 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3597
3598call_complete:
3599 if (req_complete)
3600 req_complete(hdev, status);
3601}
3602
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003603static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003605 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 struct sk_buff *skb;
3607
3608 BT_DBG("%s", hdev->name);
3609
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003611 /* Send copy to monitor */
3612 hci_send_to_monitor(hdev, skb);
3613
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 if (atomic_read(&hdev->promisc)) {
3615 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003616 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 }
3618
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003619 if (test_bit(HCI_RAW, &hdev->flags) ||
3620 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 kfree_skb(skb);
3622 continue;
3623 }
3624
3625 if (test_bit(HCI_INIT, &hdev->flags)) {
3626 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003627 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 case HCI_ACLDATA_PKT:
3629 case HCI_SCODATA_PKT:
3630 kfree_skb(skb);
3631 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 }
3634
3635 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003636 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003638 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 hci_event_packet(hdev, skb);
3640 break;
3641
3642 case HCI_ACLDATA_PKT:
3643 BT_DBG("%s ACL data packet", hdev->name);
3644 hci_acldata_packet(hdev, skb);
3645 break;
3646
3647 case HCI_SCODATA_PKT:
3648 BT_DBG("%s SCO data packet", hdev->name);
3649 hci_scodata_packet(hdev, skb);
3650 break;
3651
3652 default:
3653 kfree_skb(skb);
3654 break;
3655 }
3656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657}
3658
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003659static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003661 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 struct sk_buff *skb;
3663
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003664 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3665 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003668 if (atomic_read(&hdev->cmd_cnt)) {
3669 skb = skb_dequeue(&hdev->cmd_q);
3670 if (!skb)
3671 return;
3672
Wei Yongjun7585b972009-02-25 18:29:52 +08003673 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003675 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003676 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 atomic_dec(&hdev->cmd_cnt);
3678 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003679 if (test_bit(HCI_RESET, &hdev->flags))
3680 del_timer(&hdev->cmd_timer);
3681 else
3682 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003683 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 } else {
3685 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003686 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687 }
3688 }
3689}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003690
Andre Guedes31f79562012-04-24 21:02:53 -03003691u8 bdaddr_to_le(u8 bdaddr_type)
3692{
3693 switch (bdaddr_type) {
3694 case BDADDR_LE_PUBLIC:
3695 return ADDR_LE_DEV_PUBLIC;
3696
3697 default:
3698 /* Fallback to LE Random address type */
3699 return ADDR_LE_DEV_RANDOM;
3700 }
3701}