blob: 061523eb52a1d9362ced1be114eec8e56ad734d5 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
457 }
458
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
464
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
470
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
488 */
489 }
490
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
498 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501 }
502}
503
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 struct hci_dev *hdev = req->hdev;
507
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510
511 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513
Johan Hedberg42c6b122013-03-05 20:37:49 +0200514 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200515
516 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518
519 if (lmp_ssp_capable(hdev)) {
520 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200524 } else {
525 struct hci_cp_write_eir cp;
526
527 memset(hdev->eir, 0, sizeof(hdev->eir));
528 memset(&cp, 0, sizeof(cp));
529
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200531 }
532 }
533
534 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536
537 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539
540 if (lmp_ext_feat_capable(hdev)) {
541 struct hci_cp_read_local_ext_features cp;
542
543 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547
548 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200552 }
553}
554
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200556{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200557 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200558 struct hci_cp_write_def_link_policy cp;
559 u16 link_policy = 0;
560
561 if (lmp_rswitch_capable(hdev))
562 link_policy |= HCI_LP_RSWITCH;
563 if (lmp_hold_capable(hdev))
564 link_policy |= HCI_LP_HOLD;
565 if (lmp_sniff_capable(hdev))
566 link_policy |= HCI_LP_SNIFF;
567 if (lmp_park_capable(hdev))
568 link_policy |= HCI_LP_PARK;
569
570 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200571 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200572}
573
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577 struct hci_cp_write_le_host_supported cp;
578
Johan Hedbergc73eee92013-04-19 18:35:21 +0300579 /* LE-only devices do not support explicit enablement */
580 if (!lmp_bredr_capable(hdev))
581 return;
582
Johan Hedberg2177bab2013-03-05 20:37:43 +0200583 memset(&cp, 0, sizeof(cp));
584
585 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586 cp.le = 0x01;
587 cp.simul = lmp_le_br_capable(hdev);
588 }
589
590 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200593}
594
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200597 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300598 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100600 /* Some Broadcom based Bluetooth controllers do not support the
601 * Delete Stored Link Key command. They are clearly indicating its
602 * absence in the bit mask of supported commands.
603 *
604 * Check the supported commands and only if the the command is marked
605 * as supported send it. If not supported assume that the controller
606 * does not have actual support for stored link keys which makes this
607 * command redundant anyway.
608 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300609 if (hdev->commands[6] & 0x80) {
610 struct hci_cp_delete_stored_link_key cp;
611
612 bacpy(&cp.bdaddr, BDADDR_ANY);
613 cp.delete_all = 0x01;
614 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
615 sizeof(cp), &cp);
616 }
617
Johan Hedberg2177bab2013-03-05 20:37:43 +0200618 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200620
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500621 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200622 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500623 hci_update_ad(req);
624 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300625
626 /* Read features beyond page 1 if available */
627 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
628 struct hci_cp_read_local_ext_features cp;
629
630 cp.page = p;
631 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
632 sizeof(cp), &cp);
633 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200634}
635
636static int __hci_init(struct hci_dev *hdev)
637{
638 int err;
639
640 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
641 if (err < 0)
642 return err;
643
644 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
645 * BR/EDR/LE type controllers. AMP controllers only need the
646 * first stage init.
647 */
648 if (hdev->dev_type != HCI_BREDR)
649 return 0;
650
651 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
652 if (err < 0)
653 return err;
654
655 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
656}
657
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659{
660 __u8 scan = opt;
661
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
664 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666}
667
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
670 __u8 auth = opt;
671
Johan Hedberg42c6b122013-03-05 20:37:49 +0200672 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
674 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676}
677
Johan Hedberg42c6b122013-03-05 20:37:49 +0200678static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
680 __u8 encrypt = opt;
681
Johan Hedberg42c6b122013-03-05 20:37:49 +0200682 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200684 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200685 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686}
687
Johan Hedberg42c6b122013-03-05 20:37:49 +0200688static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200689{
690 __le16 policy = cpu_to_le16(opt);
691
Johan Hedberg42c6b122013-03-05 20:37:49 +0200692 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200693
694 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200695 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200696}
697
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900698/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 * Device is held on return. */
700struct hci_dev *hci_dev_get(int index)
701{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200702 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704 BT_DBG("%d", index);
705
706 if (index < 0)
707 return NULL;
708
709 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200710 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 if (d->id == index) {
712 hdev = hci_dev_hold(d);
713 break;
714 }
715 }
716 read_unlock(&hci_dev_list_lock);
717 return hdev;
718}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200721
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200722bool hci_discovery_active(struct hci_dev *hdev)
723{
724 struct discovery_state *discov = &hdev->discovery;
725
Andre Guedes6fbe1952012-02-03 17:47:58 -0300726 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300727 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300728 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200729 return true;
730
Andre Guedes6fbe1952012-02-03 17:47:58 -0300731 default:
732 return false;
733 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200734}
735
Johan Hedbergff9ef572012-01-04 14:23:45 +0200736void hci_discovery_set_state(struct hci_dev *hdev, int state)
737{
738 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
739
740 if (hdev->discovery.state == state)
741 return;
742
743 switch (state) {
744 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300745 if (hdev->discovery.state != DISCOVERY_STARTING)
746 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200747 break;
748 case DISCOVERY_STARTING:
749 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300750 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200751 mgmt_discovering(hdev, 1);
752 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200753 case DISCOVERY_RESOLVING:
754 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200755 case DISCOVERY_STOPPING:
756 break;
757 }
758
759 hdev->discovery.state = state;
760}
761
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300762void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
Johan Hedberg30883512012-01-04 14:16:21 +0200764 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200765 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Johan Hedberg561aafb2012-01-04 13:31:59 +0200767 list_for_each_entry_safe(p, n, &cache->all, all) {
768 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200769 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200771
772 INIT_LIST_HEAD(&cache->unknown);
773 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774}
775
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300776struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
777 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
Johan Hedberg30883512012-01-04 14:16:21 +0200779 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 struct inquiry_entry *e;
781
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300782 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
Johan Hedberg561aafb2012-01-04 13:31:59 +0200784 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200786 return e;
787 }
788
789 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790}
791
Johan Hedberg561aafb2012-01-04 13:31:59 +0200792struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300793 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200794{
Johan Hedberg30883512012-01-04 14:16:21 +0200795 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200796 struct inquiry_entry *e;
797
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300798 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200799
800 list_for_each_entry(e, &cache->unknown, list) {
801 if (!bacmp(&e->data.bdaddr, bdaddr))
802 return e;
803 }
804
805 return NULL;
806}
807
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200808struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300809 bdaddr_t *bdaddr,
810 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200811{
812 struct discovery_state *cache = &hdev->discovery;
813 struct inquiry_entry *e;
814
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300815 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200816
817 list_for_each_entry(e, &cache->resolve, list) {
818 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
819 return e;
820 if (!bacmp(&e->data.bdaddr, bdaddr))
821 return e;
822 }
823
824 return NULL;
825}
826
Johan Hedberga3d4e202012-01-09 00:53:02 +0200827void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300828 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200829{
830 struct discovery_state *cache = &hdev->discovery;
831 struct list_head *pos = &cache->resolve;
832 struct inquiry_entry *p;
833
834 list_del(&ie->list);
835
836 list_for_each_entry(p, &cache->resolve, list) {
837 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300838 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200839 break;
840 pos = &p->list;
841 }
842
843 list_add(&ie->list, pos);
844}
845
Johan Hedberg31754052012-01-04 13:39:52 +0200846bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300847 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848{
Johan Hedberg30883512012-01-04 14:16:21 +0200849 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200850 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300852 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Szymon Janc2b2fec42012-11-20 11:38:54 +0100854 hci_remove_remote_oob_data(hdev, &data->bdaddr);
855
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200856 if (ssp)
857 *ssp = data->ssp_mode;
858
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200859 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200860 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200861 if (ie->data.ssp_mode && ssp)
862 *ssp = true;
863
Johan Hedberga3d4e202012-01-09 00:53:02 +0200864 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300865 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200866 ie->data.rssi = data->rssi;
867 hci_inquiry_cache_update_resolve(hdev, ie);
868 }
869
Johan Hedberg561aafb2012-01-04 13:31:59 +0200870 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200871 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200872
Johan Hedberg561aafb2012-01-04 13:31:59 +0200873 /* Entry not in the cache. Add new one. */
874 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
875 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200876 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200877
878 list_add(&ie->all, &cache->all);
879
880 if (name_known) {
881 ie->name_state = NAME_KNOWN;
882 } else {
883 ie->name_state = NAME_NOT_KNOWN;
884 list_add(&ie->list, &cache->unknown);
885 }
886
887update:
888 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300889 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200890 ie->name_state = NAME_KNOWN;
891 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 }
893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200894 memcpy(&ie->data, data, sizeof(*data));
895 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200897
898 if (ie->name_state == NAME_NOT_KNOWN)
899 return false;
900
901 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902}
903
904static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
905{
Johan Hedberg30883512012-01-04 14:16:21 +0200906 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 struct inquiry_info *info = (struct inquiry_info *) buf;
908 struct inquiry_entry *e;
909 int copied = 0;
910
Johan Hedberg561aafb2012-01-04 13:31:59 +0200911 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200913
914 if (copied >= num)
915 break;
916
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 bacpy(&info->bdaddr, &data->bdaddr);
918 info->pscan_rep_mode = data->pscan_rep_mode;
919 info->pscan_period_mode = data->pscan_period_mode;
920 info->pscan_mode = data->pscan_mode;
921 memcpy(info->dev_class, data->dev_class, 3);
922 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200923
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200925 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 }
927
928 BT_DBG("cache %p, copied %d", cache, copied);
929 return copied;
930}
931
Johan Hedberg42c6b122013-03-05 20:37:49 +0200932static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933{
934 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200935 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 struct hci_cp_inquiry cp;
937
938 BT_DBG("%s", hdev->name);
939
940 if (test_bit(HCI_INQUIRY, &hdev->flags))
941 return;
942
943 /* Start Inquiry */
944 memcpy(&cp.lap, &ir->lap, 3);
945 cp.length = ir->length;
946 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948}
949
Andre Guedes3e13fa12013-03-27 20:04:56 -0300950static int wait_inquiry(void *word)
951{
952 schedule();
953 return signal_pending(current);
954}
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956int hci_inquiry(void __user *arg)
957{
958 __u8 __user *ptr = arg;
959 struct hci_inquiry_req ir;
960 struct hci_dev *hdev;
961 int err = 0, do_inquiry = 0, max_rsp;
962 long timeo;
963 __u8 *buf;
964
965 if (copy_from_user(&ir, ptr, sizeof(ir)))
966 return -EFAULT;
967
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200968 hdev = hci_dev_get(ir.dev_id);
969 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 return -ENODEV;
971
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300972 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900973 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300974 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300975 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 do_inquiry = 1;
977 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300978 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
Marcel Holtmann04837f62006-07-03 10:02:33 +0200980 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200981
982 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200983 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
984 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200985 if (err < 0)
986 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300987
988 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
989 * cleared). If it is interrupted by a signal, return -EINTR.
990 */
991 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
992 TASK_INTERRUPTIBLE))
993 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300996 /* for unlimited number of responses we will use buffer with
997 * 255 entries
998 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1000
1001 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1002 * copy it to the user space.
1003 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001004 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001005 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 err = -ENOMEM;
1007 goto done;
1008 }
1009
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001010 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001012 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
1014 BT_DBG("num_rsp %d", ir.num_rsp);
1015
1016 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1017 ptr += sizeof(ir);
1018 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001019 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001021 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 err = -EFAULT;
1023
1024 kfree(buf);
1025
1026done:
1027 hci_dev_put(hdev);
1028 return err;
1029}
1030
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001031static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1032{
1033 u8 ad_len = 0, flags = 0;
1034 size_t name_len;
1035
1036 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1037 flags |= LE_AD_GENERAL;
1038
1039 if (!lmp_bredr_capable(hdev))
1040 flags |= LE_AD_NO_BREDR;
1041
1042 if (lmp_le_br_capable(hdev))
1043 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1044
1045 if (lmp_host_le_br_capable(hdev))
1046 flags |= LE_AD_SIM_LE_BREDR_HOST;
1047
1048 if (flags) {
1049 BT_DBG("adv flags 0x%02x", flags);
1050
1051 ptr[0] = 2;
1052 ptr[1] = EIR_FLAGS;
1053 ptr[2] = flags;
1054
1055 ad_len += 3;
1056 ptr += 3;
1057 }
1058
1059 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1060 ptr[0] = 2;
1061 ptr[1] = EIR_TX_POWER;
1062 ptr[2] = (u8) hdev->adv_tx_power;
1063
1064 ad_len += 3;
1065 ptr += 3;
1066 }
1067
1068 name_len = strlen(hdev->dev_name);
1069 if (name_len > 0) {
1070 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1071
1072 if (name_len > max_len) {
1073 name_len = max_len;
1074 ptr[1] = EIR_NAME_SHORT;
1075 } else
1076 ptr[1] = EIR_NAME_COMPLETE;
1077
1078 ptr[0] = name_len + 1;
1079
1080 memcpy(ptr + 2, hdev->dev_name, name_len);
1081
1082 ad_len += (name_len + 2);
1083 ptr += (name_len + 2);
1084 }
1085
1086 return ad_len;
1087}
1088
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001089void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001090{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001091 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001092 struct hci_cp_le_set_adv_data cp;
1093 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001094
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001095 if (!lmp_le_capable(hdev))
1096 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001097
1098 memset(&cp, 0, sizeof(cp));
1099
1100 len = create_ad(hdev, cp.data);
1101
1102 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001103 memcmp(cp.data, hdev->adv_data, len) == 0)
1104 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001105
1106 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1107 hdev->adv_data_len = len;
1108
1109 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001110
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001111 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001112}
1113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114/* ---- HCI ioctl helpers ---- */
1115
1116int hci_dev_open(__u16 dev)
1117{
1118 struct hci_dev *hdev;
1119 int ret = 0;
1120
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001121 hdev = hci_dev_get(dev);
1122 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 return -ENODEV;
1124
1125 BT_DBG("%s %p", hdev->name, hdev);
1126
1127 hci_req_lock(hdev);
1128
Johan Hovold94324962012-03-15 14:48:41 +01001129 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1130 ret = -ENODEV;
1131 goto done;
1132 }
1133
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001134 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1135 ret = -ERFKILL;
1136 goto done;
1137 }
1138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 if (test_bit(HCI_UP, &hdev->flags)) {
1140 ret = -EALREADY;
1141 goto done;
1142 }
1143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 if (hdev->open(hdev)) {
1145 ret = -EIO;
1146 goto done;
1147 }
1148
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001149 atomic_set(&hdev->cmd_cnt, 1);
1150 set_bit(HCI_INIT, &hdev->flags);
1151
1152 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1153 ret = hdev->setup(hdev);
1154
1155 if (!ret) {
1156 /* Treat all non BR/EDR controllers as raw devices if
1157 * enable_hs is not set.
1158 */
1159 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1160 set_bit(HCI_RAW, &hdev->flags);
1161
1162 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1163 set_bit(HCI_RAW, &hdev->flags);
1164
1165 if (!test_bit(HCI_RAW, &hdev->flags))
1166 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 }
1168
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001169 clear_bit(HCI_INIT, &hdev->flags);
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 if (!ret) {
1172 hci_dev_hold(hdev);
1173 set_bit(HCI_UP, &hdev->flags);
1174 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001175 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1176 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001177 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001178 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001179 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001180 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001181 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001183 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001184 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001185 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
1187 skb_queue_purge(&hdev->cmd_q);
1188 skb_queue_purge(&hdev->rx_q);
1189
1190 if (hdev->flush)
1191 hdev->flush(hdev);
1192
1193 if (hdev->sent_cmd) {
1194 kfree_skb(hdev->sent_cmd);
1195 hdev->sent_cmd = NULL;
1196 }
1197
1198 hdev->close(hdev);
1199 hdev->flags = 0;
1200 }
1201
1202done:
1203 hci_req_unlock(hdev);
1204 hci_dev_put(hdev);
1205 return ret;
1206}
1207
1208static int hci_dev_do_close(struct hci_dev *hdev)
1209{
1210 BT_DBG("%s %p", hdev->name, hdev);
1211
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001212 cancel_delayed_work(&hdev->power_off);
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 hci_req_cancel(hdev, ENODEV);
1215 hci_req_lock(hdev);
1216
1217 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001218 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 hci_req_unlock(hdev);
1220 return 0;
1221 }
1222
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001223 /* Flush RX and TX works */
1224 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001225 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001227 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001228 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001229 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001230 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001231 }
1232
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001233 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001234 cancel_delayed_work(&hdev->service_cache);
1235
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001236 cancel_delayed_work_sync(&hdev->le_scan_disable);
1237
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001238 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001239 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001241 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
1243 hci_notify(hdev, HCI_DEV_DOWN);
1244
1245 if (hdev->flush)
1246 hdev->flush(hdev);
1247
1248 /* Reset device */
1249 skb_queue_purge(&hdev->cmd_q);
1250 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001251 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001252 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001254 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 clear_bit(HCI_INIT, &hdev->flags);
1256 }
1257
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001258 /* flush cmd work */
1259 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 /* Drop queues */
1262 skb_queue_purge(&hdev->rx_q);
1263 skb_queue_purge(&hdev->cmd_q);
1264 skb_queue_purge(&hdev->raw_q);
1265
1266 /* Drop last sent command */
1267 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001268 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 kfree_skb(hdev->sent_cmd);
1270 hdev->sent_cmd = NULL;
1271 }
1272
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001273 kfree_skb(hdev->recv_evt);
1274 hdev->recv_evt = NULL;
1275
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 /* After this point our queues are empty
1277 * and no tasks are scheduled. */
1278 hdev->close(hdev);
1279
Johan Hedberg35b973c2013-03-15 17:06:59 -05001280 /* Clear flags */
1281 hdev->flags = 0;
1282 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1283
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001284 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1285 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001286 hci_dev_lock(hdev);
1287 mgmt_powered(hdev, 0);
1288 hci_dev_unlock(hdev);
1289 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001290
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001291 /* Controller radio is available but is currently powered down */
1292 hdev->amp_status = 0;
1293
Johan Hedberge59fda82012-02-22 18:11:53 +02001294 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001295 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 hci_req_unlock(hdev);
1298
1299 hci_dev_put(hdev);
1300 return 0;
1301}
1302
1303int hci_dev_close(__u16 dev)
1304{
1305 struct hci_dev *hdev;
1306 int err;
1307
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001308 hdev = hci_dev_get(dev);
1309 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001311
1312 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1313 cancel_delayed_work(&hdev->power_off);
1314
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001316
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 hci_dev_put(hdev);
1318 return err;
1319}
1320
1321int hci_dev_reset(__u16 dev)
1322{
1323 struct hci_dev *hdev;
1324 int ret = 0;
1325
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001326 hdev = hci_dev_get(dev);
1327 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 return -ENODEV;
1329
1330 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
1332 if (!test_bit(HCI_UP, &hdev->flags))
1333 goto done;
1334
1335 /* Drop queues */
1336 skb_queue_purge(&hdev->rx_q);
1337 skb_queue_purge(&hdev->cmd_q);
1338
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001339 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001340 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001342 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
1344 if (hdev->flush)
1345 hdev->flush(hdev);
1346
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001347 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001348 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
1350 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001351 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
1353done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 hci_req_unlock(hdev);
1355 hci_dev_put(hdev);
1356 return ret;
1357}
1358
1359int hci_dev_reset_stat(__u16 dev)
1360{
1361 struct hci_dev *hdev;
1362 int ret = 0;
1363
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001364 hdev = hci_dev_get(dev);
1365 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 return -ENODEV;
1367
1368 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1369
1370 hci_dev_put(hdev);
1371
1372 return ret;
1373}
1374
1375int hci_dev_cmd(unsigned int cmd, void __user *arg)
1376{
1377 struct hci_dev *hdev;
1378 struct hci_dev_req dr;
1379 int err = 0;
1380
1381 if (copy_from_user(&dr, arg, sizeof(dr)))
1382 return -EFAULT;
1383
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001384 hdev = hci_dev_get(dr.dev_id);
1385 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 return -ENODEV;
1387
1388 switch (cmd) {
1389 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001390 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1391 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 break;
1393
1394 case HCISETENCRYPT:
1395 if (!lmp_encrypt_capable(hdev)) {
1396 err = -EOPNOTSUPP;
1397 break;
1398 }
1399
1400 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1401 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001402 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1403 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 if (err)
1405 break;
1406 }
1407
Johan Hedberg01178cd2013-03-05 20:37:41 +02001408 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1409 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 break;
1411
1412 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001413 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1414 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 break;
1416
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001417 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001418 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1419 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001420 break;
1421
1422 case HCISETLINKMODE:
1423 hdev->link_mode = ((__u16) dr.dev_opt) &
1424 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1425 break;
1426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 case HCISETPTYPE:
1428 hdev->pkt_type = (__u16) dr.dev_opt;
1429 break;
1430
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001432 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1433 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 break;
1435
1436 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001437 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1438 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 break;
1440
1441 default:
1442 err = -EINVAL;
1443 break;
1444 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 hci_dev_put(hdev);
1447 return err;
1448}
1449
1450int hci_get_dev_list(void __user *arg)
1451{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001452 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 struct hci_dev_list_req *dl;
1454 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 int n = 0, size, err;
1456 __u16 dev_num;
1457
1458 if (get_user(dev_num, (__u16 __user *) arg))
1459 return -EFAULT;
1460
1461 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1462 return -EINVAL;
1463
1464 size = sizeof(*dl) + dev_num * sizeof(*dr);
1465
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001466 dl = kzalloc(size, GFP_KERNEL);
1467 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 return -ENOMEM;
1469
1470 dr = dl->dev_req;
1471
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001472 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001473 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001474 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001475 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001476
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001477 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1478 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001479
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 (dr + n)->dev_id = hdev->id;
1481 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 if (++n >= dev_num)
1484 break;
1485 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001486 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
1488 dl->dev_num = n;
1489 size = sizeof(*dl) + n * sizeof(*dr);
1490
1491 err = copy_to_user(arg, dl, size);
1492 kfree(dl);
1493
1494 return err ? -EFAULT : 0;
1495}
1496
1497int hci_get_dev_info(void __user *arg)
1498{
1499 struct hci_dev *hdev;
1500 struct hci_dev_info di;
1501 int err = 0;
1502
1503 if (copy_from_user(&di, arg, sizeof(di)))
1504 return -EFAULT;
1505
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001506 hdev = hci_dev_get(di.dev_id);
1507 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 return -ENODEV;
1509
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001510 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001511 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001512
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001513 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1514 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001515
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 strcpy(di.name, hdev->name);
1517 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001518 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 di.flags = hdev->flags;
1520 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001521 if (lmp_bredr_capable(hdev)) {
1522 di.acl_mtu = hdev->acl_mtu;
1523 di.acl_pkts = hdev->acl_pkts;
1524 di.sco_mtu = hdev->sco_mtu;
1525 di.sco_pkts = hdev->sco_pkts;
1526 } else {
1527 di.acl_mtu = hdev->le_mtu;
1528 di.acl_pkts = hdev->le_pkts;
1529 di.sco_mtu = 0;
1530 di.sco_pkts = 0;
1531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 di.link_policy = hdev->link_policy;
1533 di.link_mode = hdev->link_mode;
1534
1535 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1536 memcpy(&di.features, &hdev->features, sizeof(di.features));
1537
1538 if (copy_to_user(arg, &di, sizeof(di)))
1539 err = -EFAULT;
1540
1541 hci_dev_put(hdev);
1542
1543 return err;
1544}
1545
1546/* ---- Interface to HCI drivers ---- */
1547
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001548static int hci_rfkill_set_block(void *data, bool blocked)
1549{
1550 struct hci_dev *hdev = data;
1551
1552 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1553
1554 if (!blocked)
1555 return 0;
1556
1557 hci_dev_do_close(hdev);
1558
1559 return 0;
1560}
1561
1562static const struct rfkill_ops hci_rfkill_ops = {
1563 .set_block = hci_rfkill_set_block,
1564};
1565
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001566static void hci_power_on(struct work_struct *work)
1567{
1568 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001569 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001570
1571 BT_DBG("%s", hdev->name);
1572
Johan Hedberg96570ff2013-05-29 09:51:29 +03001573 err = hci_dev_open(hdev->id);
1574 if (err < 0) {
1575 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001576 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001577 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001578
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001579 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001580 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1581 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001582
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001583 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001584 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001585}
1586
1587static void hci_power_off(struct work_struct *work)
1588{
Johan Hedberg32435532011-11-07 22:16:04 +02001589 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001590 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001591
1592 BT_DBG("%s", hdev->name);
1593
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001594 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001595}
1596
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001597static void hci_discov_off(struct work_struct *work)
1598{
1599 struct hci_dev *hdev;
1600 u8 scan = SCAN_PAGE;
1601
1602 hdev = container_of(work, struct hci_dev, discov_off.work);
1603
1604 BT_DBG("%s", hdev->name);
1605
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001606 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001607
1608 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1609
1610 hdev->discov_timeout = 0;
1611
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001612 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001613}
1614
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001615int hci_uuids_clear(struct hci_dev *hdev)
1616{
Johan Hedberg48210022013-01-27 00:31:28 +02001617 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001618
Johan Hedberg48210022013-01-27 00:31:28 +02001619 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1620 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001621 kfree(uuid);
1622 }
1623
1624 return 0;
1625}
1626
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001627int hci_link_keys_clear(struct hci_dev *hdev)
1628{
1629 struct list_head *p, *n;
1630
1631 list_for_each_safe(p, n, &hdev->link_keys) {
1632 struct link_key *key;
1633
1634 key = list_entry(p, struct link_key, list);
1635
1636 list_del(p);
1637 kfree(key);
1638 }
1639
1640 return 0;
1641}
1642
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001643int hci_smp_ltks_clear(struct hci_dev *hdev)
1644{
1645 struct smp_ltk *k, *tmp;
1646
1647 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1648 list_del(&k->list);
1649 kfree(k);
1650 }
1651
1652 return 0;
1653}
1654
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001655struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1656{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001657 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001658
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001659 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001660 if (bacmp(bdaddr, &k->bdaddr) == 0)
1661 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001662
1663 return NULL;
1664}
1665
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301666static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001667 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001668{
1669 /* Legacy key */
1670 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301671 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001672
1673 /* Debug keys are insecure so don't store them persistently */
1674 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301675 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001676
1677 /* Changed combination key and there's no previous one */
1678 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301679 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001680
1681 /* Security mode 3 case */
1682 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301683 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001684
1685 /* Neither local nor remote side had no-bonding as requirement */
1686 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301687 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001688
1689 /* Local side had dedicated bonding as requirement */
1690 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301691 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001692
1693 /* Remote side had dedicated bonding as requirement */
1694 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301695 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001696
1697 /* If none of the above criteria match, then don't store the key
1698 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301699 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001700}
1701
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001702struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001703{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001704 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001705
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001706 list_for_each_entry(k, &hdev->long_term_keys, list) {
1707 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001708 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001709 continue;
1710
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001711 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001712 }
1713
1714 return NULL;
1715}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001716
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001717struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001718 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001719{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001720 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001721
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001722 list_for_each_entry(k, &hdev->long_term_keys, list)
1723 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001724 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001725 return k;
1726
1727 return NULL;
1728}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001729
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001730int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001731 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001732{
1733 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301734 u8 old_key_type;
1735 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001736
1737 old_key = hci_find_link_key(hdev, bdaddr);
1738 if (old_key) {
1739 old_key_type = old_key->type;
1740 key = old_key;
1741 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001742 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001743 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1744 if (!key)
1745 return -ENOMEM;
1746 list_add(&key->list, &hdev->link_keys);
1747 }
1748
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001749 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001750
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001751 /* Some buggy controller combinations generate a changed
1752 * combination key for legacy pairing even when there's no
1753 * previous key */
1754 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001755 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001756 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001757 if (conn)
1758 conn->key_type = type;
1759 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001760
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001761 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001762 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001763 key->pin_len = pin_len;
1764
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001765 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001766 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001767 else
1768 key->type = type;
1769
Johan Hedberg4df378a2011-04-28 11:29:03 -07001770 if (!new_key)
1771 return 0;
1772
1773 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1774
Johan Hedberg744cf192011-11-08 20:40:14 +02001775 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001776
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301777 if (conn)
1778 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001779
1780 return 0;
1781}
1782
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001783int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001784 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001785 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001786{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001787 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001788
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001789 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1790 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001791
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001792 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1793 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001794 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001795 else {
1796 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001797 if (!key)
1798 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001799 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001800 }
1801
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001802 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001803 key->bdaddr_type = addr_type;
1804 memcpy(key->val, tk, sizeof(key->val));
1805 key->authenticated = authenticated;
1806 key->ediv = ediv;
1807 key->enc_size = enc_size;
1808 key->type = type;
1809 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001810
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001811 if (!new_key)
1812 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001813
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001814 if (type & HCI_SMP_LTK)
1815 mgmt_new_ltk(hdev, key, 1);
1816
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001817 return 0;
1818}
1819
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001820int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1821{
1822 struct link_key *key;
1823
1824 key = hci_find_link_key(hdev, bdaddr);
1825 if (!key)
1826 return -ENOENT;
1827
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001828 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001829
1830 list_del(&key->list);
1831 kfree(key);
1832
1833 return 0;
1834}
1835
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001836int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1837{
1838 struct smp_ltk *k, *tmp;
1839
1840 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1841 if (bacmp(bdaddr, &k->bdaddr))
1842 continue;
1843
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001844 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001845
1846 list_del(&k->list);
1847 kfree(k);
1848 }
1849
1850 return 0;
1851}
1852
Ville Tervo6bd32322011-02-16 16:32:41 +02001853/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001854static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001855{
1856 struct hci_dev *hdev = (void *) arg;
1857
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001858 if (hdev->sent_cmd) {
1859 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1860 u16 opcode = __le16_to_cpu(sent->opcode);
1861
1862 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1863 } else {
1864 BT_ERR("%s command tx timeout", hdev->name);
1865 }
1866
Ville Tervo6bd32322011-02-16 16:32:41 +02001867 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001868 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001869}
1870
Szymon Janc2763eda2011-03-22 13:12:22 +01001871struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001872 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001873{
1874 struct oob_data *data;
1875
1876 list_for_each_entry(data, &hdev->remote_oob_data, list)
1877 if (bacmp(bdaddr, &data->bdaddr) == 0)
1878 return data;
1879
1880 return NULL;
1881}
1882
1883int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1884{
1885 struct oob_data *data;
1886
1887 data = hci_find_remote_oob_data(hdev, bdaddr);
1888 if (!data)
1889 return -ENOENT;
1890
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001891 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001892
1893 list_del(&data->list);
1894 kfree(data);
1895
1896 return 0;
1897}
1898
1899int hci_remote_oob_data_clear(struct hci_dev *hdev)
1900{
1901 struct oob_data *data, *n;
1902
1903 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1904 list_del(&data->list);
1905 kfree(data);
1906 }
1907
1908 return 0;
1909}
1910
1911int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001912 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001913{
1914 struct oob_data *data;
1915
1916 data = hci_find_remote_oob_data(hdev, bdaddr);
1917
1918 if (!data) {
1919 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1920 if (!data)
1921 return -ENOMEM;
1922
1923 bacpy(&data->bdaddr, bdaddr);
1924 list_add(&data->list, &hdev->remote_oob_data);
1925 }
1926
1927 memcpy(data->hash, hash, sizeof(data->hash));
1928 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1929
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001930 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001931
1932 return 0;
1933}
1934
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001935struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001936{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001937 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001938
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001939 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001940 if (bacmp(bdaddr, &b->bdaddr) == 0)
1941 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001942
1943 return NULL;
1944}
1945
1946int hci_blacklist_clear(struct hci_dev *hdev)
1947{
1948 struct list_head *p, *n;
1949
1950 list_for_each_safe(p, n, &hdev->blacklist) {
1951 struct bdaddr_list *b;
1952
1953 b = list_entry(p, struct bdaddr_list, list);
1954
1955 list_del(p);
1956 kfree(b);
1957 }
1958
1959 return 0;
1960}
1961
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001962int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001963{
1964 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001965
1966 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1967 return -EBADF;
1968
Antti Julku5e762442011-08-25 16:48:02 +03001969 if (hci_blacklist_lookup(hdev, bdaddr))
1970 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001971
1972 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001973 if (!entry)
1974 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001975
1976 bacpy(&entry->bdaddr, bdaddr);
1977
1978 list_add(&entry->list, &hdev->blacklist);
1979
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001980 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001981}
1982
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001983int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001984{
1985 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001986
Szymon Janc1ec918c2011-11-16 09:32:21 +01001987 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001988 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001989
1990 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001991 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001992 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001993
1994 list_del(&entry->list);
1995 kfree(entry);
1996
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001997 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001998}
1999
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002000static void inquiry_complete(struct hci_dev *hdev, u8 status)
2001{
2002 if (status) {
2003 BT_ERR("Failed to start inquiry: status %d", status);
2004
2005 hci_dev_lock(hdev);
2006 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2007 hci_dev_unlock(hdev);
2008 return;
2009 }
2010}
2011
2012static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2013{
2014 /* General inquiry access code (GIAC) */
2015 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2016 struct hci_request req;
2017 struct hci_cp_inquiry cp;
2018 int err;
2019
2020 if (status) {
2021 BT_ERR("Failed to disable LE scanning: status %d", status);
2022 return;
2023 }
2024
2025 switch (hdev->discovery.type) {
2026 case DISCOV_TYPE_LE:
2027 hci_dev_lock(hdev);
2028 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2029 hci_dev_unlock(hdev);
2030 break;
2031
2032 case DISCOV_TYPE_INTERLEAVED:
2033 hci_req_init(&req, hdev);
2034
2035 memset(&cp, 0, sizeof(cp));
2036 memcpy(&cp.lap, lap, sizeof(cp.lap));
2037 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2038 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2039
2040 hci_dev_lock(hdev);
2041
2042 hci_inquiry_cache_flush(hdev);
2043
2044 err = hci_req_run(&req, inquiry_complete);
2045 if (err) {
2046 BT_ERR("Inquiry request failed: err %d", err);
2047 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2048 }
2049
2050 hci_dev_unlock(hdev);
2051 break;
2052 }
2053}
2054
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002055static void le_scan_disable_work(struct work_struct *work)
2056{
2057 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002058 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002059 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002060 struct hci_request req;
2061 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002062
2063 BT_DBG("%s", hdev->name);
2064
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002065 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002066
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002067 memset(&cp, 0, sizeof(cp));
2068 cp.enable = LE_SCAN_DISABLE;
2069 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2070
2071 err = hci_req_run(&req, le_scan_disable_work_complete);
2072 if (err)
2073 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002074}
2075
David Herrmann9be0dab2012-04-22 14:39:57 +02002076/* Alloc HCI device */
2077struct hci_dev *hci_alloc_dev(void)
2078{
2079 struct hci_dev *hdev;
2080
2081 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2082 if (!hdev)
2083 return NULL;
2084
David Herrmannb1b813d2012-04-22 14:39:58 +02002085 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2086 hdev->esco_type = (ESCO_HV1);
2087 hdev->link_mode = (HCI_LM_ACCEPT);
2088 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002089 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2090 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002091
David Herrmannb1b813d2012-04-22 14:39:58 +02002092 hdev->sniff_max_interval = 800;
2093 hdev->sniff_min_interval = 80;
2094
2095 mutex_init(&hdev->lock);
2096 mutex_init(&hdev->req_lock);
2097
2098 INIT_LIST_HEAD(&hdev->mgmt_pending);
2099 INIT_LIST_HEAD(&hdev->blacklist);
2100 INIT_LIST_HEAD(&hdev->uuids);
2101 INIT_LIST_HEAD(&hdev->link_keys);
2102 INIT_LIST_HEAD(&hdev->long_term_keys);
2103 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002104 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002105
2106 INIT_WORK(&hdev->rx_work, hci_rx_work);
2107 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2108 INIT_WORK(&hdev->tx_work, hci_tx_work);
2109 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002110
David Herrmannb1b813d2012-04-22 14:39:58 +02002111 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2112 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2113 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2114
David Herrmannb1b813d2012-04-22 14:39:58 +02002115 skb_queue_head_init(&hdev->rx_q);
2116 skb_queue_head_init(&hdev->cmd_q);
2117 skb_queue_head_init(&hdev->raw_q);
2118
2119 init_waitqueue_head(&hdev->req_wait_q);
2120
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002121 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002122
David Herrmannb1b813d2012-04-22 14:39:58 +02002123 hci_init_sysfs(hdev);
2124 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002125
2126 return hdev;
2127}
2128EXPORT_SYMBOL(hci_alloc_dev);
2129
2130/* Free HCI device */
2131void hci_free_dev(struct hci_dev *hdev)
2132{
David Herrmann9be0dab2012-04-22 14:39:57 +02002133 /* will free via device release */
2134 put_device(&hdev->dev);
2135}
2136EXPORT_SYMBOL(hci_free_dev);
2137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138/* Register HCI device */
2139int hci_register_dev(struct hci_dev *hdev)
2140{
David Herrmannb1b813d2012-04-22 14:39:58 +02002141 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
David Herrmann010666a2012-01-07 15:47:07 +01002143 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 return -EINVAL;
2145
Mat Martineau08add512011-11-02 16:18:36 -07002146 /* Do not allow HCI_AMP devices to register at index 0,
2147 * so the index can be used as the AMP controller ID.
2148 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002149 switch (hdev->dev_type) {
2150 case HCI_BREDR:
2151 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2152 break;
2153 case HCI_AMP:
2154 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2155 break;
2156 default:
2157 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002159
Sasha Levin3df92b32012-05-27 22:36:56 +02002160 if (id < 0)
2161 return id;
2162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 sprintf(hdev->name, "hci%d", id);
2164 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002165
2166 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2167
Sasha Levin3df92b32012-05-27 22:36:56 +02002168 write_lock(&hci_dev_list_lock);
2169 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002170 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002172 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002173 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002174 if (!hdev->workqueue) {
2175 error = -ENOMEM;
2176 goto err;
2177 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002178
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002179 hdev->req_workqueue = alloc_workqueue(hdev->name,
2180 WQ_HIGHPRI | WQ_UNBOUND |
2181 WQ_MEM_RECLAIM, 1);
2182 if (!hdev->req_workqueue) {
2183 destroy_workqueue(hdev->workqueue);
2184 error = -ENOMEM;
2185 goto err;
2186 }
2187
David Herrmann33ca9542011-10-08 14:58:49 +02002188 error = hci_add_sysfs(hdev);
2189 if (error < 0)
2190 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002192 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002193 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2194 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002195 if (hdev->rfkill) {
2196 if (rfkill_register(hdev->rfkill) < 0) {
2197 rfkill_destroy(hdev->rfkill);
2198 hdev->rfkill = NULL;
2199 }
2200 }
2201
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002202 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002203
2204 if (hdev->dev_type != HCI_AMP)
2205 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2206
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002208 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Johan Hedberg19202572013-01-14 22:33:51 +02002210 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002213
David Herrmann33ca9542011-10-08 14:58:49 +02002214err_wqueue:
2215 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002216 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002217err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002218 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002219 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002220 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002221 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002222
David Herrmann33ca9542011-10-08 14:58:49 +02002223 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224}
2225EXPORT_SYMBOL(hci_register_dev);
2226
2227/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002228void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229{
Sasha Levin3df92b32012-05-27 22:36:56 +02002230 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002231
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002232 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
Johan Hovold94324962012-03-15 14:48:41 +01002234 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2235
Sasha Levin3df92b32012-05-27 22:36:56 +02002236 id = hdev->id;
2237
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002238 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002240 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
2242 hci_dev_do_close(hdev);
2243
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302244 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002245 kfree_skb(hdev->reassembly[i]);
2246
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002247 cancel_work_sync(&hdev->power_on);
2248
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002249 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002250 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002251 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002252 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002253 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002254 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002255
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002256 /* mgmt_index_removed should take care of emptying the
2257 * pending list */
2258 BUG_ON(!list_empty(&hdev->mgmt_pending));
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 hci_notify(hdev, HCI_DEV_UNREG);
2261
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002262 if (hdev->rfkill) {
2263 rfkill_unregister(hdev->rfkill);
2264 rfkill_destroy(hdev->rfkill);
2265 }
2266
David Herrmannce242972011-10-08 14:58:48 +02002267 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002268
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002269 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002270 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002271
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002272 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002273 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002274 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002275 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002276 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002277 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002278 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002279
David Herrmanndc946bd2012-01-07 15:47:24 +01002280 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002281
2282 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283}
2284EXPORT_SYMBOL(hci_unregister_dev);
2285
2286/* Suspend HCI device */
2287int hci_suspend_dev(struct hci_dev *hdev)
2288{
2289 hci_notify(hdev, HCI_DEV_SUSPEND);
2290 return 0;
2291}
2292EXPORT_SYMBOL(hci_suspend_dev);
2293
2294/* Resume HCI device */
2295int hci_resume_dev(struct hci_dev *hdev)
2296{
2297 hci_notify(hdev, HCI_DEV_RESUME);
2298 return 0;
2299}
2300EXPORT_SYMBOL(hci_resume_dev);
2301
Marcel Holtmann76bca882009-11-18 00:40:39 +01002302/* Receive frame from HCI drivers */
2303int hci_recv_frame(struct sk_buff *skb)
2304{
2305 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2306 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002307 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002308 kfree_skb(skb);
2309 return -ENXIO;
2310 }
2311
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002312 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002313 bt_cb(skb)->incoming = 1;
2314
2315 /* Time stamp */
2316 __net_timestamp(skb);
2317
Marcel Holtmann76bca882009-11-18 00:40:39 +01002318 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002319 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002320
Marcel Holtmann76bca882009-11-18 00:40:39 +01002321 return 0;
2322}
2323EXPORT_SYMBOL(hci_recv_frame);
2324
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302325static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002326 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302327{
2328 int len = 0;
2329 int hlen = 0;
2330 int remain = count;
2331 struct sk_buff *skb;
2332 struct bt_skb_cb *scb;
2333
2334 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002335 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302336 return -EILSEQ;
2337
2338 skb = hdev->reassembly[index];
2339
2340 if (!skb) {
2341 switch (type) {
2342 case HCI_ACLDATA_PKT:
2343 len = HCI_MAX_FRAME_SIZE;
2344 hlen = HCI_ACL_HDR_SIZE;
2345 break;
2346 case HCI_EVENT_PKT:
2347 len = HCI_MAX_EVENT_SIZE;
2348 hlen = HCI_EVENT_HDR_SIZE;
2349 break;
2350 case HCI_SCODATA_PKT:
2351 len = HCI_MAX_SCO_SIZE;
2352 hlen = HCI_SCO_HDR_SIZE;
2353 break;
2354 }
2355
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002356 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302357 if (!skb)
2358 return -ENOMEM;
2359
2360 scb = (void *) skb->cb;
2361 scb->expect = hlen;
2362 scb->pkt_type = type;
2363
2364 skb->dev = (void *) hdev;
2365 hdev->reassembly[index] = skb;
2366 }
2367
2368 while (count) {
2369 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002370 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302371
2372 memcpy(skb_put(skb, len), data, len);
2373
2374 count -= len;
2375 data += len;
2376 scb->expect -= len;
2377 remain = count;
2378
2379 switch (type) {
2380 case HCI_EVENT_PKT:
2381 if (skb->len == HCI_EVENT_HDR_SIZE) {
2382 struct hci_event_hdr *h = hci_event_hdr(skb);
2383 scb->expect = h->plen;
2384
2385 if (skb_tailroom(skb) < scb->expect) {
2386 kfree_skb(skb);
2387 hdev->reassembly[index] = NULL;
2388 return -ENOMEM;
2389 }
2390 }
2391 break;
2392
2393 case HCI_ACLDATA_PKT:
2394 if (skb->len == HCI_ACL_HDR_SIZE) {
2395 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2396 scb->expect = __le16_to_cpu(h->dlen);
2397
2398 if (skb_tailroom(skb) < scb->expect) {
2399 kfree_skb(skb);
2400 hdev->reassembly[index] = NULL;
2401 return -ENOMEM;
2402 }
2403 }
2404 break;
2405
2406 case HCI_SCODATA_PKT:
2407 if (skb->len == HCI_SCO_HDR_SIZE) {
2408 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2409 scb->expect = h->dlen;
2410
2411 if (skb_tailroom(skb) < scb->expect) {
2412 kfree_skb(skb);
2413 hdev->reassembly[index] = NULL;
2414 return -ENOMEM;
2415 }
2416 }
2417 break;
2418 }
2419
2420 if (scb->expect == 0) {
2421 /* Complete frame */
2422
2423 bt_cb(skb)->pkt_type = type;
2424 hci_recv_frame(skb);
2425
2426 hdev->reassembly[index] = NULL;
2427 return remain;
2428 }
2429 }
2430
2431 return remain;
2432}
2433
Marcel Holtmannef222012007-07-11 06:42:04 +02002434int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2435{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302436 int rem = 0;
2437
Marcel Holtmannef222012007-07-11 06:42:04 +02002438 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2439 return -EILSEQ;
2440
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002441 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002442 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302443 if (rem < 0)
2444 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002445
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302446 data += (count - rem);
2447 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002448 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002449
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302450 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002451}
2452EXPORT_SYMBOL(hci_recv_fragment);
2453
Suraj Sumangala99811512010-07-14 13:02:19 +05302454#define STREAM_REASSEMBLY 0
2455
2456int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2457{
2458 int type;
2459 int rem = 0;
2460
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002461 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302462 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2463
2464 if (!skb) {
2465 struct { char type; } *pkt;
2466
2467 /* Start of the frame */
2468 pkt = data;
2469 type = pkt->type;
2470
2471 data++;
2472 count--;
2473 } else
2474 type = bt_cb(skb)->pkt_type;
2475
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002476 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002477 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302478 if (rem < 0)
2479 return rem;
2480
2481 data += (count - rem);
2482 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002483 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302484
2485 return rem;
2486}
2487EXPORT_SYMBOL(hci_recv_stream_fragment);
2488
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489/* ---- Interface to upper protocols ---- */
2490
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491int hci_register_cb(struct hci_cb *cb)
2492{
2493 BT_DBG("%p name %s", cb, cb->name);
2494
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002495 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002497 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
2499 return 0;
2500}
2501EXPORT_SYMBOL(hci_register_cb);
2502
2503int hci_unregister_cb(struct hci_cb *cb)
2504{
2505 BT_DBG("%p name %s", cb, cb->name);
2506
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002507 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002509 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510
2511 return 0;
2512}
2513EXPORT_SYMBOL(hci_unregister_cb);
2514
2515static int hci_send_frame(struct sk_buff *skb)
2516{
2517 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2518
2519 if (!hdev) {
2520 kfree_skb(skb);
2521 return -ENODEV;
2522 }
2523
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002524 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002526 /* Time stamp */
2527 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002529 /* Send copy to monitor */
2530 hci_send_to_monitor(hdev, skb);
2531
2532 if (atomic_read(&hdev->promisc)) {
2533 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002534 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 }
2536
2537 /* Get rid of skb owner, prior to sending to the driver. */
2538 skb_orphan(skb);
2539
2540 return hdev->send(skb);
2541}
2542
Johan Hedberg3119ae92013-03-05 20:37:44 +02002543void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2544{
2545 skb_queue_head_init(&req->cmd_q);
2546 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002547 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002548}
2549
2550int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2551{
2552 struct hci_dev *hdev = req->hdev;
2553 struct sk_buff *skb;
2554 unsigned long flags;
2555
2556 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2557
Andre Guedes5d73e032013-03-08 11:20:16 -03002558 /* If an error occured during request building, remove all HCI
2559 * commands queued on the HCI request queue.
2560 */
2561 if (req->err) {
2562 skb_queue_purge(&req->cmd_q);
2563 return req->err;
2564 }
2565
Johan Hedberg3119ae92013-03-05 20:37:44 +02002566 /* Do not allow empty requests */
2567 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002568 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002569
2570 skb = skb_peek_tail(&req->cmd_q);
2571 bt_cb(skb)->req.complete = complete;
2572
2573 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2574 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2575 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2576
2577 queue_work(hdev->workqueue, &hdev->cmd_work);
2578
2579 return 0;
2580}
2581
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002582static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002583 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584{
2585 int len = HCI_COMMAND_HDR_SIZE + plen;
2586 struct hci_command_hdr *hdr;
2587 struct sk_buff *skb;
2588
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002590 if (!skb)
2591 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592
2593 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002594 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 hdr->plen = plen;
2596
2597 if (plen)
2598 memcpy(skb_put(skb, plen), param, plen);
2599
2600 BT_DBG("skb len %d", skb->len);
2601
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002602 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002604
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002605 return skb;
2606}
2607
2608/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002609int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2610 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002611{
2612 struct sk_buff *skb;
2613
2614 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2615
2616 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2617 if (!skb) {
2618 BT_ERR("%s no memory for command", hdev->name);
2619 return -ENOMEM;
2620 }
2621
Johan Hedberg11714b32013-03-05 20:37:47 +02002622 /* Stand-alone HCI commands must be flaged as
2623 * single-command requests.
2624 */
2625 bt_cb(skb)->req.start = true;
2626
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002628 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
2630 return 0;
2631}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632
Johan Hedberg71c76a12013-03-05 20:37:46 +02002633/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002634void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2635 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002636{
2637 struct hci_dev *hdev = req->hdev;
2638 struct sk_buff *skb;
2639
2640 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2641
Andre Guedes34739c12013-03-08 11:20:18 -03002642 /* If an error occured during request building, there is no point in
2643 * queueing the HCI command. We can simply return.
2644 */
2645 if (req->err)
2646 return;
2647
Johan Hedberg71c76a12013-03-05 20:37:46 +02002648 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2649 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002650 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2651 hdev->name, opcode);
2652 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002653 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002654 }
2655
2656 if (skb_queue_empty(&req->cmd_q))
2657 bt_cb(skb)->req.start = true;
2658
Johan Hedberg02350a72013-04-03 21:50:29 +03002659 bt_cb(skb)->req.event = event;
2660
Johan Hedberg71c76a12013-03-05 20:37:46 +02002661 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002662}
2663
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002664void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2665 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002666{
2667 hci_req_add_ev(req, opcode, plen, param, 0);
2668}
2669
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002671void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672{
2673 struct hci_command_hdr *hdr;
2674
2675 if (!hdev->sent_cmd)
2676 return NULL;
2677
2678 hdr = (void *) hdev->sent_cmd->data;
2679
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002680 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 return NULL;
2682
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002683 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684
2685 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2686}
2687
2688/* Send ACL data */
2689static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2690{
2691 struct hci_acl_hdr *hdr;
2692 int len = skb->len;
2693
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002694 skb_push(skb, HCI_ACL_HDR_SIZE);
2695 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002696 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002697 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2698 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699}
2700
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002701static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002702 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002704 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 struct hci_dev *hdev = conn->hdev;
2706 struct sk_buff *list;
2707
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002708 skb->len = skb_headlen(skb);
2709 skb->data_len = 0;
2710
2711 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002712
2713 switch (hdev->dev_type) {
2714 case HCI_BREDR:
2715 hci_add_acl_hdr(skb, conn->handle, flags);
2716 break;
2717 case HCI_AMP:
2718 hci_add_acl_hdr(skb, chan->handle, flags);
2719 break;
2720 default:
2721 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2722 return;
2723 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002724
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002725 list = skb_shinfo(skb)->frag_list;
2726 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 /* Non fragmented */
2728 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2729
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002730 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 } else {
2732 /* Fragmented */
2733 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2734
2735 skb_shinfo(skb)->frag_list = NULL;
2736
2737 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002738 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002740 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002741
2742 flags &= ~ACL_START;
2743 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 do {
2745 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002746
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002748 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002749 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750
2751 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2752
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002753 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 } while (list);
2755
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002756 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002758}
2759
2760void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2761{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002762 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002763
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002764 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002765
2766 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002767
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002768 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002770 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
2773/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002774void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775{
2776 struct hci_dev *hdev = conn->hdev;
2777 struct hci_sco_hdr hdr;
2778
2779 BT_DBG("%s len %d", hdev->name, skb->len);
2780
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002781 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 hdr.dlen = skb->len;
2783
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002784 skb_push(skb, HCI_SCO_HDR_SIZE);
2785 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002786 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
2788 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002789 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002790
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002792 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
2795/* ---- HCI TX task (outgoing data) ---- */
2796
2797/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002798static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2799 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800{
2801 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002802 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002803 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002805 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002807
2808 rcu_read_lock();
2809
2810 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002811 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002813
2814 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2815 continue;
2816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 num++;
2818
2819 if (c->sent < min) {
2820 min = c->sent;
2821 conn = c;
2822 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002823
2824 if (hci_conn_num(hdev, type) == num)
2825 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 }
2827
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002828 rcu_read_unlock();
2829
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002831 int cnt, q;
2832
2833 switch (conn->type) {
2834 case ACL_LINK:
2835 cnt = hdev->acl_cnt;
2836 break;
2837 case SCO_LINK:
2838 case ESCO_LINK:
2839 cnt = hdev->sco_cnt;
2840 break;
2841 case LE_LINK:
2842 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2843 break;
2844 default:
2845 cnt = 0;
2846 BT_ERR("Unknown link type");
2847 }
2848
2849 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 *quote = q ? q : 1;
2851 } else
2852 *quote = 0;
2853
2854 BT_DBG("conn %p quote %d", conn, *quote);
2855 return conn;
2856}
2857
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002858static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859{
2860 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002861 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862
Ville Tervobae1f5d92011-02-10 22:38:53 -03002863 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002865 rcu_read_lock();
2866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002868 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002869 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002870 BT_ERR("%s killing stalled connection %pMR",
2871 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002872 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 }
2874 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002875
2876 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877}
2878
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002879static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2880 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002881{
2882 struct hci_conn_hash *h = &hdev->conn_hash;
2883 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002884 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002885 struct hci_conn *conn;
2886 int cnt, q, conn_num = 0;
2887
2888 BT_DBG("%s", hdev->name);
2889
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002890 rcu_read_lock();
2891
2892 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002893 struct hci_chan *tmp;
2894
2895 if (conn->type != type)
2896 continue;
2897
2898 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2899 continue;
2900
2901 conn_num++;
2902
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002903 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002904 struct sk_buff *skb;
2905
2906 if (skb_queue_empty(&tmp->data_q))
2907 continue;
2908
2909 skb = skb_peek(&tmp->data_q);
2910 if (skb->priority < cur_prio)
2911 continue;
2912
2913 if (skb->priority > cur_prio) {
2914 num = 0;
2915 min = ~0;
2916 cur_prio = skb->priority;
2917 }
2918
2919 num++;
2920
2921 if (conn->sent < min) {
2922 min = conn->sent;
2923 chan = tmp;
2924 }
2925 }
2926
2927 if (hci_conn_num(hdev, type) == conn_num)
2928 break;
2929 }
2930
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002931 rcu_read_unlock();
2932
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002933 if (!chan)
2934 return NULL;
2935
2936 switch (chan->conn->type) {
2937 case ACL_LINK:
2938 cnt = hdev->acl_cnt;
2939 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002940 case AMP_LINK:
2941 cnt = hdev->block_cnt;
2942 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002943 case SCO_LINK:
2944 case ESCO_LINK:
2945 cnt = hdev->sco_cnt;
2946 break;
2947 case LE_LINK:
2948 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2949 break;
2950 default:
2951 cnt = 0;
2952 BT_ERR("Unknown link type");
2953 }
2954
2955 q = cnt / num;
2956 *quote = q ? q : 1;
2957 BT_DBG("chan %p quote %d", chan, *quote);
2958 return chan;
2959}
2960
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002961static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2962{
2963 struct hci_conn_hash *h = &hdev->conn_hash;
2964 struct hci_conn *conn;
2965 int num = 0;
2966
2967 BT_DBG("%s", hdev->name);
2968
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002969 rcu_read_lock();
2970
2971 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002972 struct hci_chan *chan;
2973
2974 if (conn->type != type)
2975 continue;
2976
2977 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2978 continue;
2979
2980 num++;
2981
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002982 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002983 struct sk_buff *skb;
2984
2985 if (chan->sent) {
2986 chan->sent = 0;
2987 continue;
2988 }
2989
2990 if (skb_queue_empty(&chan->data_q))
2991 continue;
2992
2993 skb = skb_peek(&chan->data_q);
2994 if (skb->priority >= HCI_PRIO_MAX - 1)
2995 continue;
2996
2997 skb->priority = HCI_PRIO_MAX - 1;
2998
2999 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003000 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003001 }
3002
3003 if (hci_conn_num(hdev, type) == num)
3004 break;
3005 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003006
3007 rcu_read_unlock();
3008
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003009}
3010
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003011static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3012{
3013 /* Calculate count of blocks used by this packet */
3014 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3015}
3016
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003017static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 if (!test_bit(HCI_RAW, &hdev->flags)) {
3020 /* ACL tx timeout must be longer than maximum
3021 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003022 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003023 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003024 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003026}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003028static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003029{
3030 unsigned int cnt = hdev->acl_cnt;
3031 struct hci_chan *chan;
3032 struct sk_buff *skb;
3033 int quote;
3034
3035 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003036
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003037 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003038 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003039 u32 priority = (skb_peek(&chan->data_q))->priority;
3040 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003041 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003042 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003043
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003044 /* Stop if priority has changed */
3045 if (skb->priority < priority)
3046 break;
3047
3048 skb = skb_dequeue(&chan->data_q);
3049
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003050 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003051 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003052
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 hci_send_frame(skb);
3054 hdev->acl_last_tx = jiffies;
3055
3056 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003057 chan->sent++;
3058 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 }
3060 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003061
3062 if (cnt != hdev->acl_cnt)
3063 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064}
3065
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003066static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003067{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003068 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003069 struct hci_chan *chan;
3070 struct sk_buff *skb;
3071 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003072 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003073
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003074 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003075
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003076 BT_DBG("%s", hdev->name);
3077
3078 if (hdev->dev_type == HCI_AMP)
3079 type = AMP_LINK;
3080 else
3081 type = ACL_LINK;
3082
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003083 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003084 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003085 u32 priority = (skb_peek(&chan->data_q))->priority;
3086 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3087 int blocks;
3088
3089 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003090 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003091
3092 /* Stop if priority has changed */
3093 if (skb->priority < priority)
3094 break;
3095
3096 skb = skb_dequeue(&chan->data_q);
3097
3098 blocks = __get_blocks(hdev, skb);
3099 if (blocks > hdev->block_cnt)
3100 return;
3101
3102 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003103 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003104
3105 hci_send_frame(skb);
3106 hdev->acl_last_tx = jiffies;
3107
3108 hdev->block_cnt -= blocks;
3109 quote -= blocks;
3110
3111 chan->sent += blocks;
3112 chan->conn->sent += blocks;
3113 }
3114 }
3115
3116 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003117 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003118}
3119
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003120static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003121{
3122 BT_DBG("%s", hdev->name);
3123
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003124 /* No ACL link over BR/EDR controller */
3125 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3126 return;
3127
3128 /* No AMP link over AMP controller */
3129 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003130 return;
3131
3132 switch (hdev->flow_ctl_mode) {
3133 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3134 hci_sched_acl_pkt(hdev);
3135 break;
3136
3137 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3138 hci_sched_acl_blk(hdev);
3139 break;
3140 }
3141}
3142
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003144static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145{
3146 struct hci_conn *conn;
3147 struct sk_buff *skb;
3148 int quote;
3149
3150 BT_DBG("%s", hdev->name);
3151
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003152 if (!hci_conn_num(hdev, SCO_LINK))
3153 return;
3154
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3156 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3157 BT_DBG("skb %p len %d", skb, skb->len);
3158 hci_send_frame(skb);
3159
3160 conn->sent++;
3161 if (conn->sent == ~0)
3162 conn->sent = 0;
3163 }
3164 }
3165}
3166
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003167static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003168{
3169 struct hci_conn *conn;
3170 struct sk_buff *skb;
3171 int quote;
3172
3173 BT_DBG("%s", hdev->name);
3174
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003175 if (!hci_conn_num(hdev, ESCO_LINK))
3176 return;
3177
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003178 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3179 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003180 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3181 BT_DBG("skb %p len %d", skb, skb->len);
3182 hci_send_frame(skb);
3183
3184 conn->sent++;
3185 if (conn->sent == ~0)
3186 conn->sent = 0;
3187 }
3188 }
3189}
3190
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003191static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003192{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003193 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003194 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003195 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003196
3197 BT_DBG("%s", hdev->name);
3198
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003199 if (!hci_conn_num(hdev, LE_LINK))
3200 return;
3201
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003202 if (!test_bit(HCI_RAW, &hdev->flags)) {
3203 /* LE tx timeout must be longer than maximum
3204 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003205 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003206 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003207 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003208 }
3209
3210 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003211 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003212 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003213 u32 priority = (skb_peek(&chan->data_q))->priority;
3214 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003215 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003216 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003217
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003218 /* Stop if priority has changed */
3219 if (skb->priority < priority)
3220 break;
3221
3222 skb = skb_dequeue(&chan->data_q);
3223
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003224 hci_send_frame(skb);
3225 hdev->le_last_tx = jiffies;
3226
3227 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003228 chan->sent++;
3229 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003230 }
3231 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003232
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003233 if (hdev->le_pkts)
3234 hdev->le_cnt = cnt;
3235 else
3236 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003237
3238 if (cnt != tmp)
3239 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003240}
3241
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003242static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003244 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245 struct sk_buff *skb;
3246
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003247 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003248 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249
3250 /* Schedule queues and send stuff to HCI driver */
3251
3252 hci_sched_acl(hdev);
3253
3254 hci_sched_sco(hdev);
3255
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003256 hci_sched_esco(hdev);
3257
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003258 hci_sched_le(hdev);
3259
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 /* Send next queued raw (unknown type) packet */
3261 while ((skb = skb_dequeue(&hdev->raw_q)))
3262 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263}
3264
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003265/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266
3267/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003268static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269{
3270 struct hci_acl_hdr *hdr = (void *) skb->data;
3271 struct hci_conn *conn;
3272 __u16 handle, flags;
3273
3274 skb_pull(skb, HCI_ACL_HDR_SIZE);
3275
3276 handle = __le16_to_cpu(hdr->handle);
3277 flags = hci_flags(handle);
3278 handle = hci_handle(handle);
3279
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003280 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003281 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282
3283 hdev->stat.acl_rx++;
3284
3285 hci_dev_lock(hdev);
3286 conn = hci_conn_hash_lookup_handle(hdev, handle);
3287 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003288
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003290 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003291
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003293 l2cap_recv_acldata(conn, skb, flags);
3294 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003296 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003297 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298 }
3299
3300 kfree_skb(skb);
3301}
3302
3303/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003304static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305{
3306 struct hci_sco_hdr *hdr = (void *) skb->data;
3307 struct hci_conn *conn;
3308 __u16 handle;
3309
3310 skb_pull(skb, HCI_SCO_HDR_SIZE);
3311
3312 handle = __le16_to_cpu(hdr->handle);
3313
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003314 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315
3316 hdev->stat.sco_rx++;
3317
3318 hci_dev_lock(hdev);
3319 conn = hci_conn_hash_lookup_handle(hdev, handle);
3320 hci_dev_unlock(hdev);
3321
3322 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003324 sco_recv_scodata(conn, skb);
3325 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003327 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003328 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329 }
3330
3331 kfree_skb(skb);
3332}
3333
Johan Hedberg9238f362013-03-05 20:37:48 +02003334static bool hci_req_is_complete(struct hci_dev *hdev)
3335{
3336 struct sk_buff *skb;
3337
3338 skb = skb_peek(&hdev->cmd_q);
3339 if (!skb)
3340 return true;
3341
3342 return bt_cb(skb)->req.start;
3343}
3344
Johan Hedberg42c6b122013-03-05 20:37:49 +02003345static void hci_resend_last(struct hci_dev *hdev)
3346{
3347 struct hci_command_hdr *sent;
3348 struct sk_buff *skb;
3349 u16 opcode;
3350
3351 if (!hdev->sent_cmd)
3352 return;
3353
3354 sent = (void *) hdev->sent_cmd->data;
3355 opcode = __le16_to_cpu(sent->opcode);
3356 if (opcode == HCI_OP_RESET)
3357 return;
3358
3359 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3360 if (!skb)
3361 return;
3362
3363 skb_queue_head(&hdev->cmd_q, skb);
3364 queue_work(hdev->workqueue, &hdev->cmd_work);
3365}
3366
Johan Hedberg9238f362013-03-05 20:37:48 +02003367void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3368{
3369 hci_req_complete_t req_complete = NULL;
3370 struct sk_buff *skb;
3371 unsigned long flags;
3372
3373 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3374
Johan Hedberg42c6b122013-03-05 20:37:49 +02003375 /* If the completed command doesn't match the last one that was
3376 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003377 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003378 if (!hci_sent_cmd_data(hdev, opcode)) {
3379 /* Some CSR based controllers generate a spontaneous
3380 * reset complete event during init and any pending
3381 * command will never be completed. In such a case we
3382 * need to resend whatever was the last sent
3383 * command.
3384 */
3385 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3386 hci_resend_last(hdev);
3387
Johan Hedberg9238f362013-03-05 20:37:48 +02003388 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003389 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003390
3391 /* If the command succeeded and there's still more commands in
3392 * this request the request is not yet complete.
3393 */
3394 if (!status && !hci_req_is_complete(hdev))
3395 return;
3396
3397 /* If this was the last command in a request the complete
3398 * callback would be found in hdev->sent_cmd instead of the
3399 * command queue (hdev->cmd_q).
3400 */
3401 if (hdev->sent_cmd) {
3402 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3403 if (req_complete)
3404 goto call_complete;
3405 }
3406
3407 /* Remove all pending commands belonging to this request */
3408 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3409 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3410 if (bt_cb(skb)->req.start) {
3411 __skb_queue_head(&hdev->cmd_q, skb);
3412 break;
3413 }
3414
3415 req_complete = bt_cb(skb)->req.complete;
3416 kfree_skb(skb);
3417 }
3418 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3419
3420call_complete:
3421 if (req_complete)
3422 req_complete(hdev, status);
3423}
3424
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003425static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003427 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 struct sk_buff *skb;
3429
3430 BT_DBG("%s", hdev->name);
3431
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003433 /* Send copy to monitor */
3434 hci_send_to_monitor(hdev, skb);
3435
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 if (atomic_read(&hdev->promisc)) {
3437 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003438 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 }
3440
3441 if (test_bit(HCI_RAW, &hdev->flags)) {
3442 kfree_skb(skb);
3443 continue;
3444 }
3445
3446 if (test_bit(HCI_INIT, &hdev->flags)) {
3447 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003448 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 case HCI_ACLDATA_PKT:
3450 case HCI_SCODATA_PKT:
3451 kfree_skb(skb);
3452 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 }
3455
3456 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003457 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003459 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 hci_event_packet(hdev, skb);
3461 break;
3462
3463 case HCI_ACLDATA_PKT:
3464 BT_DBG("%s ACL data packet", hdev->name);
3465 hci_acldata_packet(hdev, skb);
3466 break;
3467
3468 case HCI_SCODATA_PKT:
3469 BT_DBG("%s SCO data packet", hdev->name);
3470 hci_scodata_packet(hdev, skb);
3471 break;
3472
3473 default:
3474 kfree_skb(skb);
3475 break;
3476 }
3477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478}
3479
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003480static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003482 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 struct sk_buff *skb;
3484
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003485 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3486 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003489 if (atomic_read(&hdev->cmd_cnt)) {
3490 skb = skb_dequeue(&hdev->cmd_q);
3491 if (!skb)
3492 return;
3493
Wei Yongjun7585b972009-02-25 18:29:52 +08003494 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003496 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3497 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 atomic_dec(&hdev->cmd_cnt);
3499 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003500 if (test_bit(HCI_RESET, &hdev->flags))
3501 del_timer(&hdev->cmd_timer);
3502 else
3503 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003504 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 } else {
3506 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003507 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 }
3509 }
3510}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003511
Andre Guedes31f79562012-04-24 21:02:53 -03003512u8 bdaddr_to_le(u8 bdaddr_type)
3513{
3514 switch (bdaddr_type) {
3515 case BDADDR_LE_PUBLIC:
3516 return ADDR_LE_DEV_PUBLIC;
3517
3518 default:
3519 /* Fallback to LE Random address type */
3520 return ADDR_LE_DEV_RANDOM;
3521 }
3522}