blob: d817c932d634e6609081dbf8f492cf04463b867d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
344 struct hci_cp_delete_stored_link_key cp;
345 __le16 param;
346 __u8 flt_type;
347
348 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200349 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200350
351 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200352 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200353
354 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200355 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200356
357 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200358 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200359
360 /* Clear Event Filters */
361 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200362 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200363
364 /* Connection accept timeout ~20 secs */
365 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200366 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200367
368 bacpy(&cp.bdaddr, BDADDR_ANY);
369 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedbergf332ec62013-03-15 17:07:11 -0500371
372 /* Read page scan parameters */
373 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
375 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
376 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200377}
378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300381 struct hci_dev *hdev = req->hdev;
382
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200385
386 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200391
392 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200394
395 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300397
398 /* LE-only controllers have LE implicitly enabled */
399 if (!lmp_bredr_capable(hdev))
400 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200401}
402
403static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
404{
405 if (lmp_ext_inq_capable(hdev))
406 return 0x02;
407
408 if (lmp_inq_rssi_capable(hdev))
409 return 0x01;
410
411 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412 hdev->lmp_subver == 0x0757)
413 return 0x01;
414
415 if (hdev->manufacturer == 15) {
416 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
417 return 0x01;
418 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
419 return 0x01;
420 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
421 return 0x01;
422 }
423
424 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425 hdev->lmp_subver == 0x1805)
426 return 0x01;
427
428 return 0x00;
429}
430
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200432{
433 u8 mode;
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200438}
439
Johan Hedberg42c6b122013-03-05 20:37:49 +0200440static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200441{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200442 struct hci_dev *hdev = req->hdev;
443
Johan Hedberg2177bab2013-03-05 20:37:43 +0200444 /* The second byte is 0xff instead of 0x9f (two reserved bits
445 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
446 * command otherwise.
447 */
448 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
449
450 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451 * any event mask for pre 1.2 devices.
452 */
453 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
454 return;
455
456 if (lmp_bredr_capable(hdev)) {
457 events[4] |= 0x01; /* Flow Specification Complete */
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460 events[5] |= 0x08; /* Synchronous Connection Complete */
461 events[5] |= 0x10; /* Synchronous Connection Changed */
462 }
463
464 if (lmp_inq_rssi_capable(hdev))
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
466
467 if (lmp_sniffsubr_capable(hdev))
468 events[5] |= 0x20; /* Sniff Subrating */
469
470 if (lmp_pause_enc_capable(hdev))
471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
472
473 if (lmp_ext_inq_capable(hdev))
474 events[5] |= 0x40; /* Extended Inquiry Result */
475
476 if (lmp_no_flush_capable(hdev))
477 events[7] |= 0x01; /* Enhanced Flush Complete */
478
479 if (lmp_lsto_capable(hdev))
480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
481
482 if (lmp_ssp_capable(hdev)) {
483 events[6] |= 0x01; /* IO Capability Request */
484 events[6] |= 0x02; /* IO Capability Response */
485 events[6] |= 0x04; /* User Confirmation Request */
486 events[6] |= 0x08; /* User Passkey Request */
487 events[6] |= 0x10; /* Remote OOB Data Request */
488 events[6] |= 0x20; /* Simple Pairing Complete */
489 events[7] |= 0x04; /* User Passkey Notification */
490 events[7] |= 0x08; /* Keypress Notification */
491 events[7] |= 0x10; /* Remote Host Supported
492 * Features Notification
493 */
494 }
495
496 if (lmp_le_capable(hdev))
497 events[7] |= 0x20; /* LE Meta-Event */
498
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500
501 if (lmp_le_capable(hdev)) {
502 memset(events, 0, sizeof(events));
503 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200506 }
507}
508
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 struct hci_dev *hdev = req->hdev;
512
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200514 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200515
516 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518
Johan Hedberg42c6b122013-03-05 20:37:49 +0200519 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520
521 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200523
524 if (lmp_ssp_capable(hdev)) {
525 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
526 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200527 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 } else {
530 struct hci_cp_write_eir cp;
531
532 memset(hdev->eir, 0, sizeof(hdev->eir));
533 memset(&cp, 0, sizeof(cp));
534
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536 }
537 }
538
539 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541
542 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200544
545 if (lmp_ext_feat_capable(hdev)) {
546 struct hci_cp_read_local_ext_features cp;
547
548 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200549 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
550 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551 }
552
553 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
554 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
556 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557 }
558}
559
Johan Hedberg42c6b122013-03-05 20:37:49 +0200560static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200562 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 struct hci_cp_write_def_link_policy cp;
564 u16 link_policy = 0;
565
566 if (lmp_rswitch_capable(hdev))
567 link_policy |= HCI_LP_RSWITCH;
568 if (lmp_hold_capable(hdev))
569 link_policy |= HCI_LP_HOLD;
570 if (lmp_sniff_capable(hdev))
571 link_policy |= HCI_LP_SNIFF;
572 if (lmp_park_capable(hdev))
573 link_policy |= HCI_LP_PARK;
574
575 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577}
578
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200580{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200581 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200582 struct hci_cp_write_le_host_supported cp;
583
Johan Hedbergc73eee92013-04-19 18:35:21 +0300584 /* LE-only devices do not support explicit enablement */
585 if (!lmp_bredr_capable(hdev))
586 return;
587
Johan Hedberg2177bab2013-03-05 20:37:43 +0200588 memset(&cp, 0, sizeof(cp));
589
590 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
591 cp.le = 0x01;
592 cp.simul = lmp_le_br_capable(hdev);
593 }
594
595 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200596 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
597 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598}
599
Johan Hedberg42c6b122013-03-05 20:37:49 +0200600static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200601{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200602 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300603 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200604
Johan Hedberg2177bab2013-03-05 20:37:43 +0200605 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200607
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500608 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200609 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500610 hci_update_ad(req);
611 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300612
613 /* Read features beyond page 1 if available */
614 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
615 struct hci_cp_read_local_ext_features cp;
616
617 cp.page = p;
618 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
619 sizeof(cp), &cp);
620 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621}
622
623static int __hci_init(struct hci_dev *hdev)
624{
625 int err;
626
627 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
628 if (err < 0)
629 return err;
630
631 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
632 * BR/EDR/LE type controllers. AMP controllers only need the
633 * first stage init.
634 */
635 if (hdev->dev_type != HCI_BREDR)
636 return 0;
637
638 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
639 if (err < 0)
640 return err;
641
642 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
643}
644
Johan Hedberg42c6b122013-03-05 20:37:49 +0200645static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646{
647 __u8 scan = opt;
648
Johan Hedberg42c6b122013-03-05 20:37:49 +0200649 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
651 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200652 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653}
654
Johan Hedberg42c6b122013-03-05 20:37:49 +0200655static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656{
657 __u8 auth = opt;
658
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
661 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
667 __u8 encrypt = opt;
668
Johan Hedberg42c6b122013-03-05 20:37:49 +0200669 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200671 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200672 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200676{
677 __le16 policy = cpu_to_le16(opt);
678
Johan Hedberg42c6b122013-03-05 20:37:49 +0200679 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200680
681 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200682 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200683}
684
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900685/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 * Device is held on return. */
687struct hci_dev *hci_dev_get(int index)
688{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200689 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
691 BT_DBG("%d", index);
692
693 if (index < 0)
694 return NULL;
695
696 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200697 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 if (d->id == index) {
699 hdev = hci_dev_hold(d);
700 break;
701 }
702 }
703 read_unlock(&hci_dev_list_lock);
704 return hdev;
705}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200708
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200709bool hci_discovery_active(struct hci_dev *hdev)
710{
711 struct discovery_state *discov = &hdev->discovery;
712
Andre Guedes6fbe1952012-02-03 17:47:58 -0300713 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300714 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300715 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200716 return true;
717
Andre Guedes6fbe1952012-02-03 17:47:58 -0300718 default:
719 return false;
720 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200721}
722
Johan Hedbergff9ef572012-01-04 14:23:45 +0200723void hci_discovery_set_state(struct hci_dev *hdev, int state)
724{
725 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
726
727 if (hdev->discovery.state == state)
728 return;
729
730 switch (state) {
731 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300732 if (hdev->discovery.state != DISCOVERY_STARTING)
733 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200734 break;
735 case DISCOVERY_STARTING:
736 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300737 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200738 mgmt_discovering(hdev, 1);
739 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200740 case DISCOVERY_RESOLVING:
741 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200742 case DISCOVERY_STOPPING:
743 break;
744 }
745
746 hdev->discovery.state = state;
747}
748
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749static void inquiry_cache_flush(struct hci_dev *hdev)
750{
Johan Hedberg30883512012-01-04 14:16:21 +0200751 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200752 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Johan Hedberg561aafb2012-01-04 13:31:59 +0200754 list_for_each_entry_safe(p, n, &cache->all, all) {
755 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200756 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200758
759 INIT_LIST_HEAD(&cache->unknown);
760 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761}
762
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300763struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
764 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
Johan Hedberg30883512012-01-04 14:16:21 +0200766 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 struct inquiry_entry *e;
768
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300769 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Johan Hedberg561aafb2012-01-04 13:31:59 +0200771 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200773 return e;
774 }
775
776 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
Johan Hedberg561aafb2012-01-04 13:31:59 +0200779struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300780 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200781{
Johan Hedberg30883512012-01-04 14:16:21 +0200782 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200783 struct inquiry_entry *e;
784
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300785 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200786
787 list_for_each_entry(e, &cache->unknown, list) {
788 if (!bacmp(&e->data.bdaddr, bdaddr))
789 return e;
790 }
791
792 return NULL;
793}
794
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200795struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300796 bdaddr_t *bdaddr,
797 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200798{
799 struct discovery_state *cache = &hdev->discovery;
800 struct inquiry_entry *e;
801
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300802 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200803
804 list_for_each_entry(e, &cache->resolve, list) {
805 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
806 return e;
807 if (!bacmp(&e->data.bdaddr, bdaddr))
808 return e;
809 }
810
811 return NULL;
812}
813
Johan Hedberga3d4e202012-01-09 00:53:02 +0200814void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300815 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200816{
817 struct discovery_state *cache = &hdev->discovery;
818 struct list_head *pos = &cache->resolve;
819 struct inquiry_entry *p;
820
821 list_del(&ie->list);
822
823 list_for_each_entry(p, &cache->resolve, list) {
824 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300825 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200826 break;
827 pos = &p->list;
828 }
829
830 list_add(&ie->list, pos);
831}
832
Johan Hedberg31754052012-01-04 13:39:52 +0200833bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300834 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835{
Johan Hedberg30883512012-01-04 14:16:21 +0200836 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200837 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300839 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Szymon Janc2b2fec42012-11-20 11:38:54 +0100841 hci_remove_remote_oob_data(hdev, &data->bdaddr);
842
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200843 if (ssp)
844 *ssp = data->ssp_mode;
845
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200846 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200847 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200848 if (ie->data.ssp_mode && ssp)
849 *ssp = true;
850
Johan Hedberga3d4e202012-01-09 00:53:02 +0200851 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300852 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200853 ie->data.rssi = data->rssi;
854 hci_inquiry_cache_update_resolve(hdev, ie);
855 }
856
Johan Hedberg561aafb2012-01-04 13:31:59 +0200857 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200858 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200859
Johan Hedberg561aafb2012-01-04 13:31:59 +0200860 /* Entry not in the cache. Add new one. */
861 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
862 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200863 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200864
865 list_add(&ie->all, &cache->all);
866
867 if (name_known) {
868 ie->name_state = NAME_KNOWN;
869 } else {
870 ie->name_state = NAME_NOT_KNOWN;
871 list_add(&ie->list, &cache->unknown);
872 }
873
874update:
875 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300876 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200877 ie->name_state = NAME_KNOWN;
878 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 }
880
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200881 memcpy(&ie->data, data, sizeof(*data));
882 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200884
885 if (ie->name_state == NAME_NOT_KNOWN)
886 return false;
887
888 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889}
890
891static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
892{
Johan Hedberg30883512012-01-04 14:16:21 +0200893 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 struct inquiry_info *info = (struct inquiry_info *) buf;
895 struct inquiry_entry *e;
896 int copied = 0;
897
Johan Hedberg561aafb2012-01-04 13:31:59 +0200898 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200900
901 if (copied >= num)
902 break;
903
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 bacpy(&info->bdaddr, &data->bdaddr);
905 info->pscan_rep_mode = data->pscan_rep_mode;
906 info->pscan_period_mode = data->pscan_period_mode;
907 info->pscan_mode = data->pscan_mode;
908 memcpy(info->dev_class, data->dev_class, 3);
909 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200912 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 }
914
915 BT_DBG("cache %p, copied %d", cache, copied);
916 return copied;
917}
918
Johan Hedberg42c6b122013-03-05 20:37:49 +0200919static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
921 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200922 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 struct hci_cp_inquiry cp;
924
925 BT_DBG("%s", hdev->name);
926
927 if (test_bit(HCI_INQUIRY, &hdev->flags))
928 return;
929
930 /* Start Inquiry */
931 memcpy(&cp.lap, &ir->lap, 3);
932 cp.length = ir->length;
933 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200934 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935}
936
Andre Guedes3e13fa12013-03-27 20:04:56 -0300937static int wait_inquiry(void *word)
938{
939 schedule();
940 return signal_pending(current);
941}
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943int hci_inquiry(void __user *arg)
944{
945 __u8 __user *ptr = arg;
946 struct hci_inquiry_req ir;
947 struct hci_dev *hdev;
948 int err = 0, do_inquiry = 0, max_rsp;
949 long timeo;
950 __u8 *buf;
951
952 if (copy_from_user(&ir, ptr, sizeof(ir)))
953 return -EFAULT;
954
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200955 hdev = hci_dev_get(ir.dev_id);
956 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 return -ENODEV;
958
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300959 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900960 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300961 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 inquiry_cache_flush(hdev);
963 do_inquiry = 1;
964 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300965 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Marcel Holtmann04837f62006-07-03 10:02:33 +0200967 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200968
969 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200970 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
971 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200972 if (err < 0)
973 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300974
975 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
976 * cleared). If it is interrupted by a signal, return -EINTR.
977 */
978 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
979 TASK_INTERRUPTIBLE))
980 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200981 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300983 /* for unlimited number of responses we will use buffer with
984 * 255 entries
985 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
987
988 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
989 * copy it to the user space.
990 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100991 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200992 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 err = -ENOMEM;
994 goto done;
995 }
996
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300997 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300999 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 BT_DBG("num_rsp %d", ir.num_rsp);
1002
1003 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1004 ptr += sizeof(ir);
1005 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001006 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001008 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 err = -EFAULT;
1010
1011 kfree(buf);
1012
1013done:
1014 hci_dev_put(hdev);
1015 return err;
1016}
1017
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001018static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1019{
1020 u8 ad_len = 0, flags = 0;
1021 size_t name_len;
1022
1023 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1024 flags |= LE_AD_GENERAL;
1025
1026 if (!lmp_bredr_capable(hdev))
1027 flags |= LE_AD_NO_BREDR;
1028
1029 if (lmp_le_br_capable(hdev))
1030 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1031
1032 if (lmp_host_le_br_capable(hdev))
1033 flags |= LE_AD_SIM_LE_BREDR_HOST;
1034
1035 if (flags) {
1036 BT_DBG("adv flags 0x%02x", flags);
1037
1038 ptr[0] = 2;
1039 ptr[1] = EIR_FLAGS;
1040 ptr[2] = flags;
1041
1042 ad_len += 3;
1043 ptr += 3;
1044 }
1045
1046 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1047 ptr[0] = 2;
1048 ptr[1] = EIR_TX_POWER;
1049 ptr[2] = (u8) hdev->adv_tx_power;
1050
1051 ad_len += 3;
1052 ptr += 3;
1053 }
1054
1055 name_len = strlen(hdev->dev_name);
1056 if (name_len > 0) {
1057 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1058
1059 if (name_len > max_len) {
1060 name_len = max_len;
1061 ptr[1] = EIR_NAME_SHORT;
1062 } else
1063 ptr[1] = EIR_NAME_COMPLETE;
1064
1065 ptr[0] = name_len + 1;
1066
1067 memcpy(ptr + 2, hdev->dev_name, name_len);
1068
1069 ad_len += (name_len + 2);
1070 ptr += (name_len + 2);
1071 }
1072
1073 return ad_len;
1074}
1075
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001076void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001077{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001078 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001079 struct hci_cp_le_set_adv_data cp;
1080 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001081
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001082 if (!lmp_le_capable(hdev))
1083 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001084
1085 memset(&cp, 0, sizeof(cp));
1086
1087 len = create_ad(hdev, cp.data);
1088
1089 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001090 memcmp(cp.data, hdev->adv_data, len) == 0)
1091 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001092
1093 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1094 hdev->adv_data_len = len;
1095
1096 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001097
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001098 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001099}
1100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101/* ---- HCI ioctl helpers ---- */
1102
1103int hci_dev_open(__u16 dev)
1104{
1105 struct hci_dev *hdev;
1106 int ret = 0;
1107
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001108 hdev = hci_dev_get(dev);
1109 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 return -ENODEV;
1111
1112 BT_DBG("%s %p", hdev->name, hdev);
1113
1114 hci_req_lock(hdev);
1115
Johan Hovold94324962012-03-15 14:48:41 +01001116 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1117 ret = -ENODEV;
1118 goto done;
1119 }
1120
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001121 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1122 ret = -ERFKILL;
1123 goto done;
1124 }
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 if (test_bit(HCI_UP, &hdev->flags)) {
1127 ret = -EALREADY;
1128 goto done;
1129 }
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 if (hdev->open(hdev)) {
1132 ret = -EIO;
1133 goto done;
1134 }
1135
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001136 atomic_set(&hdev->cmd_cnt, 1);
1137 set_bit(HCI_INIT, &hdev->flags);
1138
1139 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1140 ret = hdev->setup(hdev);
1141
1142 if (!ret) {
1143 /* Treat all non BR/EDR controllers as raw devices if
1144 * enable_hs is not set.
1145 */
1146 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1147 set_bit(HCI_RAW, &hdev->flags);
1148
1149 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1150 set_bit(HCI_RAW, &hdev->flags);
1151
1152 if (!test_bit(HCI_RAW, &hdev->flags))
1153 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 }
1155
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001156 clear_bit(HCI_INIT, &hdev->flags);
1157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 if (!ret) {
1159 hci_dev_hold(hdev);
1160 set_bit(HCI_UP, &hdev->flags);
1161 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001162 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1163 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001164 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001165 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001166 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001167 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001168 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001170 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001171 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001172 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
1174 skb_queue_purge(&hdev->cmd_q);
1175 skb_queue_purge(&hdev->rx_q);
1176
1177 if (hdev->flush)
1178 hdev->flush(hdev);
1179
1180 if (hdev->sent_cmd) {
1181 kfree_skb(hdev->sent_cmd);
1182 hdev->sent_cmd = NULL;
1183 }
1184
1185 hdev->close(hdev);
1186 hdev->flags = 0;
1187 }
1188
1189done:
1190 hci_req_unlock(hdev);
1191 hci_dev_put(hdev);
1192 return ret;
1193}
1194
1195static int hci_dev_do_close(struct hci_dev *hdev)
1196{
1197 BT_DBG("%s %p", hdev->name, hdev);
1198
Andre Guedes28b75a82012-02-03 17:48:00 -03001199 cancel_work_sync(&hdev->le_scan);
1200
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001201 cancel_delayed_work(&hdev->power_off);
1202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 hci_req_cancel(hdev, ENODEV);
1204 hci_req_lock(hdev);
1205
1206 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001207 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 hci_req_unlock(hdev);
1209 return 0;
1210 }
1211
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001212 /* Flush RX and TX works */
1213 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001214 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001216 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001217 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001218 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001219 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001220 }
1221
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001222 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001223 cancel_delayed_work(&hdev->service_cache);
1224
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001225 cancel_delayed_work_sync(&hdev->le_scan_disable);
1226
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001227 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 inquiry_cache_flush(hdev);
1229 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001230 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
1232 hci_notify(hdev, HCI_DEV_DOWN);
1233
1234 if (hdev->flush)
1235 hdev->flush(hdev);
1236
1237 /* Reset device */
1238 skb_queue_purge(&hdev->cmd_q);
1239 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001240 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001241 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001243 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 clear_bit(HCI_INIT, &hdev->flags);
1245 }
1246
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001247 /* flush cmd work */
1248 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
1250 /* Drop queues */
1251 skb_queue_purge(&hdev->rx_q);
1252 skb_queue_purge(&hdev->cmd_q);
1253 skb_queue_purge(&hdev->raw_q);
1254
1255 /* Drop last sent command */
1256 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001257 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 kfree_skb(hdev->sent_cmd);
1259 hdev->sent_cmd = NULL;
1260 }
1261
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001262 kfree_skb(hdev->recv_evt);
1263 hdev->recv_evt = NULL;
1264
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 /* After this point our queues are empty
1266 * and no tasks are scheduled. */
1267 hdev->close(hdev);
1268
Johan Hedberg35b973c2013-03-15 17:06:59 -05001269 /* Clear flags */
1270 hdev->flags = 0;
1271 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1272
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001273 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1274 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001275 hci_dev_lock(hdev);
1276 mgmt_powered(hdev, 0);
1277 hci_dev_unlock(hdev);
1278 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001279
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001280 /* Controller radio is available but is currently powered down */
1281 hdev->amp_status = 0;
1282
Johan Hedberge59fda82012-02-22 18:11:53 +02001283 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001284 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001285
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 hci_req_unlock(hdev);
1287
1288 hci_dev_put(hdev);
1289 return 0;
1290}
1291
1292int hci_dev_close(__u16 dev)
1293{
1294 struct hci_dev *hdev;
1295 int err;
1296
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001297 hdev = hci_dev_get(dev);
1298 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001300
1301 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1302 cancel_delayed_work(&hdev->power_off);
1303
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001305
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 hci_dev_put(hdev);
1307 return err;
1308}
1309
1310int hci_dev_reset(__u16 dev)
1311{
1312 struct hci_dev *hdev;
1313 int ret = 0;
1314
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001315 hdev = hci_dev_get(dev);
1316 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 return -ENODEV;
1318
1319 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 if (!test_bit(HCI_UP, &hdev->flags))
1322 goto done;
1323
1324 /* Drop queues */
1325 skb_queue_purge(&hdev->rx_q);
1326 skb_queue_purge(&hdev->cmd_q);
1327
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001328 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 inquiry_cache_flush(hdev);
1330 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001331 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
1333 if (hdev->flush)
1334 hdev->flush(hdev);
1335
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001336 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001337 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
1339 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001340 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
1342done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 hci_req_unlock(hdev);
1344 hci_dev_put(hdev);
1345 return ret;
1346}
1347
1348int hci_dev_reset_stat(__u16 dev)
1349{
1350 struct hci_dev *hdev;
1351 int ret = 0;
1352
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001353 hdev = hci_dev_get(dev);
1354 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 return -ENODEV;
1356
1357 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1358
1359 hci_dev_put(hdev);
1360
1361 return ret;
1362}
1363
1364int hci_dev_cmd(unsigned int cmd, void __user *arg)
1365{
1366 struct hci_dev *hdev;
1367 struct hci_dev_req dr;
1368 int err = 0;
1369
1370 if (copy_from_user(&dr, arg, sizeof(dr)))
1371 return -EFAULT;
1372
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001373 hdev = hci_dev_get(dr.dev_id);
1374 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 return -ENODEV;
1376
1377 switch (cmd) {
1378 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001379 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1380 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 break;
1382
1383 case HCISETENCRYPT:
1384 if (!lmp_encrypt_capable(hdev)) {
1385 err = -EOPNOTSUPP;
1386 break;
1387 }
1388
1389 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1390 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001391 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1392 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 if (err)
1394 break;
1395 }
1396
Johan Hedberg01178cd2013-03-05 20:37:41 +02001397 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1398 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 break;
1400
1401 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001402 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1403 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 break;
1405
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001406 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001407 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1408 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001409 break;
1410
1411 case HCISETLINKMODE:
1412 hdev->link_mode = ((__u16) dr.dev_opt) &
1413 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1414 break;
1415
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 case HCISETPTYPE:
1417 hdev->pkt_type = (__u16) dr.dev_opt;
1418 break;
1419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001421 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1422 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 break;
1424
1425 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001426 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1427 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 break;
1429
1430 default:
1431 err = -EINVAL;
1432 break;
1433 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 hci_dev_put(hdev);
1436 return err;
1437}
1438
1439int hci_get_dev_list(void __user *arg)
1440{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001441 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 struct hci_dev_list_req *dl;
1443 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 int n = 0, size, err;
1445 __u16 dev_num;
1446
1447 if (get_user(dev_num, (__u16 __user *) arg))
1448 return -EFAULT;
1449
1450 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1451 return -EINVAL;
1452
1453 size = sizeof(*dl) + dev_num * sizeof(*dr);
1454
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001455 dl = kzalloc(size, GFP_KERNEL);
1456 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 return -ENOMEM;
1458
1459 dr = dl->dev_req;
1460
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001461 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001462 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001463 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001464 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001465
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001466 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1467 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001468
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 (dr + n)->dev_id = hdev->id;
1470 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 if (++n >= dev_num)
1473 break;
1474 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001475 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
1477 dl->dev_num = n;
1478 size = sizeof(*dl) + n * sizeof(*dr);
1479
1480 err = copy_to_user(arg, dl, size);
1481 kfree(dl);
1482
1483 return err ? -EFAULT : 0;
1484}
1485
1486int hci_get_dev_info(void __user *arg)
1487{
1488 struct hci_dev *hdev;
1489 struct hci_dev_info di;
1490 int err = 0;
1491
1492 if (copy_from_user(&di, arg, sizeof(di)))
1493 return -EFAULT;
1494
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001495 hdev = hci_dev_get(di.dev_id);
1496 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 return -ENODEV;
1498
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001499 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001500 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001501
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001502 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1503 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001504
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 strcpy(di.name, hdev->name);
1506 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001507 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 di.flags = hdev->flags;
1509 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001510 if (lmp_bredr_capable(hdev)) {
1511 di.acl_mtu = hdev->acl_mtu;
1512 di.acl_pkts = hdev->acl_pkts;
1513 di.sco_mtu = hdev->sco_mtu;
1514 di.sco_pkts = hdev->sco_pkts;
1515 } else {
1516 di.acl_mtu = hdev->le_mtu;
1517 di.acl_pkts = hdev->le_pkts;
1518 di.sco_mtu = 0;
1519 di.sco_pkts = 0;
1520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 di.link_policy = hdev->link_policy;
1522 di.link_mode = hdev->link_mode;
1523
1524 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1525 memcpy(&di.features, &hdev->features, sizeof(di.features));
1526
1527 if (copy_to_user(arg, &di, sizeof(di)))
1528 err = -EFAULT;
1529
1530 hci_dev_put(hdev);
1531
1532 return err;
1533}
1534
1535/* ---- Interface to HCI drivers ---- */
1536
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001537static int hci_rfkill_set_block(void *data, bool blocked)
1538{
1539 struct hci_dev *hdev = data;
1540
1541 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1542
1543 if (!blocked)
1544 return 0;
1545
1546 hci_dev_do_close(hdev);
1547
1548 return 0;
1549}
1550
1551static const struct rfkill_ops hci_rfkill_ops = {
1552 .set_block = hci_rfkill_set_block,
1553};
1554
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001555static void hci_power_on(struct work_struct *work)
1556{
1557 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001558 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001559
1560 BT_DBG("%s", hdev->name);
1561
Johan Hedberg96570ff2013-05-29 09:51:29 +03001562 err = hci_dev_open(hdev->id);
1563 if (err < 0) {
1564 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001565 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001566 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001567
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001568 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001569 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1570 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001571
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001572 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001573 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001574}
1575
1576static void hci_power_off(struct work_struct *work)
1577{
Johan Hedberg32435532011-11-07 22:16:04 +02001578 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001579 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001580
1581 BT_DBG("%s", hdev->name);
1582
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001583 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001584}
1585
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001586static void hci_discov_off(struct work_struct *work)
1587{
1588 struct hci_dev *hdev;
1589 u8 scan = SCAN_PAGE;
1590
1591 hdev = container_of(work, struct hci_dev, discov_off.work);
1592
1593 BT_DBG("%s", hdev->name);
1594
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001595 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001596
1597 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1598
1599 hdev->discov_timeout = 0;
1600
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001601 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001602}
1603
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001604int hci_uuids_clear(struct hci_dev *hdev)
1605{
Johan Hedberg48210022013-01-27 00:31:28 +02001606 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001607
Johan Hedberg48210022013-01-27 00:31:28 +02001608 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1609 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001610 kfree(uuid);
1611 }
1612
1613 return 0;
1614}
1615
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001616int hci_link_keys_clear(struct hci_dev *hdev)
1617{
1618 struct list_head *p, *n;
1619
1620 list_for_each_safe(p, n, &hdev->link_keys) {
1621 struct link_key *key;
1622
1623 key = list_entry(p, struct link_key, list);
1624
1625 list_del(p);
1626 kfree(key);
1627 }
1628
1629 return 0;
1630}
1631
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001632int hci_smp_ltks_clear(struct hci_dev *hdev)
1633{
1634 struct smp_ltk *k, *tmp;
1635
1636 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1637 list_del(&k->list);
1638 kfree(k);
1639 }
1640
1641 return 0;
1642}
1643
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001644struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1645{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001646 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001647
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001648 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001649 if (bacmp(bdaddr, &k->bdaddr) == 0)
1650 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001651
1652 return NULL;
1653}
1654
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301655static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001656 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001657{
1658 /* Legacy key */
1659 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301660 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001661
1662 /* Debug keys are insecure so don't store them persistently */
1663 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301664 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001665
1666 /* Changed combination key and there's no previous one */
1667 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301668 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001669
1670 /* Security mode 3 case */
1671 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301672 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001673
1674 /* Neither local nor remote side had no-bonding as requirement */
1675 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301676 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001677
1678 /* Local side had dedicated bonding as requirement */
1679 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301680 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001681
1682 /* Remote side had dedicated bonding as requirement */
1683 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301684 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001685
1686 /* If none of the above criteria match, then don't store the key
1687 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301688 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001689}
1690
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001691struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001692{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001693 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001694
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001695 list_for_each_entry(k, &hdev->long_term_keys, list) {
1696 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001697 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001698 continue;
1699
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001700 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001701 }
1702
1703 return NULL;
1704}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001705
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001706struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001707 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001708{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001709 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001710
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001711 list_for_each_entry(k, &hdev->long_term_keys, list)
1712 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001713 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001714 return k;
1715
1716 return NULL;
1717}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001718
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001719int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001720 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001721{
1722 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301723 u8 old_key_type;
1724 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001725
1726 old_key = hci_find_link_key(hdev, bdaddr);
1727 if (old_key) {
1728 old_key_type = old_key->type;
1729 key = old_key;
1730 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001731 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001732 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1733 if (!key)
1734 return -ENOMEM;
1735 list_add(&key->list, &hdev->link_keys);
1736 }
1737
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001738 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001739
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001740 /* Some buggy controller combinations generate a changed
1741 * combination key for legacy pairing even when there's no
1742 * previous key */
1743 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001744 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001745 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001746 if (conn)
1747 conn->key_type = type;
1748 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001749
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001750 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001751 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001752 key->pin_len = pin_len;
1753
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001754 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001755 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001756 else
1757 key->type = type;
1758
Johan Hedberg4df378a2011-04-28 11:29:03 -07001759 if (!new_key)
1760 return 0;
1761
1762 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1763
Johan Hedberg744cf192011-11-08 20:40:14 +02001764 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001765
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301766 if (conn)
1767 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001768
1769 return 0;
1770}
1771
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001772int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001773 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001774 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001775{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001776 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001777
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001778 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1779 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001780
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001781 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1782 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001783 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001784 else {
1785 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001786 if (!key)
1787 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001788 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001789 }
1790
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001791 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001792 key->bdaddr_type = addr_type;
1793 memcpy(key->val, tk, sizeof(key->val));
1794 key->authenticated = authenticated;
1795 key->ediv = ediv;
1796 key->enc_size = enc_size;
1797 key->type = type;
1798 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001799
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001800 if (!new_key)
1801 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001802
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001803 if (type & HCI_SMP_LTK)
1804 mgmt_new_ltk(hdev, key, 1);
1805
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001806 return 0;
1807}
1808
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001809int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1810{
1811 struct link_key *key;
1812
1813 key = hci_find_link_key(hdev, bdaddr);
1814 if (!key)
1815 return -ENOENT;
1816
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001817 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001818
1819 list_del(&key->list);
1820 kfree(key);
1821
1822 return 0;
1823}
1824
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001825int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1826{
1827 struct smp_ltk *k, *tmp;
1828
1829 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1830 if (bacmp(bdaddr, &k->bdaddr))
1831 continue;
1832
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001833 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001834
1835 list_del(&k->list);
1836 kfree(k);
1837 }
1838
1839 return 0;
1840}
1841
Ville Tervo6bd32322011-02-16 16:32:41 +02001842/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001843static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001844{
1845 struct hci_dev *hdev = (void *) arg;
1846
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001847 if (hdev->sent_cmd) {
1848 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1849 u16 opcode = __le16_to_cpu(sent->opcode);
1850
1851 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1852 } else {
1853 BT_ERR("%s command tx timeout", hdev->name);
1854 }
1855
Ville Tervo6bd32322011-02-16 16:32:41 +02001856 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001857 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001858}
1859
Szymon Janc2763eda2011-03-22 13:12:22 +01001860struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001861 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001862{
1863 struct oob_data *data;
1864
1865 list_for_each_entry(data, &hdev->remote_oob_data, list)
1866 if (bacmp(bdaddr, &data->bdaddr) == 0)
1867 return data;
1868
1869 return NULL;
1870}
1871
1872int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1873{
1874 struct oob_data *data;
1875
1876 data = hci_find_remote_oob_data(hdev, bdaddr);
1877 if (!data)
1878 return -ENOENT;
1879
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001880 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001881
1882 list_del(&data->list);
1883 kfree(data);
1884
1885 return 0;
1886}
1887
1888int hci_remote_oob_data_clear(struct hci_dev *hdev)
1889{
1890 struct oob_data *data, *n;
1891
1892 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1893 list_del(&data->list);
1894 kfree(data);
1895 }
1896
1897 return 0;
1898}
1899
1900int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001901 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001902{
1903 struct oob_data *data;
1904
1905 data = hci_find_remote_oob_data(hdev, bdaddr);
1906
1907 if (!data) {
1908 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1909 if (!data)
1910 return -ENOMEM;
1911
1912 bacpy(&data->bdaddr, bdaddr);
1913 list_add(&data->list, &hdev->remote_oob_data);
1914 }
1915
1916 memcpy(data->hash, hash, sizeof(data->hash));
1917 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1918
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001919 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001920
1921 return 0;
1922}
1923
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001924struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001925{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001926 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001927
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001928 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001929 if (bacmp(bdaddr, &b->bdaddr) == 0)
1930 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001931
1932 return NULL;
1933}
1934
1935int hci_blacklist_clear(struct hci_dev *hdev)
1936{
1937 struct list_head *p, *n;
1938
1939 list_for_each_safe(p, n, &hdev->blacklist) {
1940 struct bdaddr_list *b;
1941
1942 b = list_entry(p, struct bdaddr_list, list);
1943
1944 list_del(p);
1945 kfree(b);
1946 }
1947
1948 return 0;
1949}
1950
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001951int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001952{
1953 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001954
1955 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1956 return -EBADF;
1957
Antti Julku5e762442011-08-25 16:48:02 +03001958 if (hci_blacklist_lookup(hdev, bdaddr))
1959 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001960
1961 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001962 if (!entry)
1963 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001964
1965 bacpy(&entry->bdaddr, bdaddr);
1966
1967 list_add(&entry->list, &hdev->blacklist);
1968
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001969 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001970}
1971
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001972int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001973{
1974 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001975
Szymon Janc1ec918c2011-11-16 09:32:21 +01001976 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001977 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001978
1979 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001980 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001981 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001982
1983 list_del(&entry->list);
1984 kfree(entry);
1985
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001986 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001987}
1988
Johan Hedberg42c6b122013-03-05 20:37:49 +02001989static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001990{
1991 struct le_scan_params *param = (struct le_scan_params *) opt;
1992 struct hci_cp_le_set_scan_param cp;
1993
1994 memset(&cp, 0, sizeof(cp));
1995 cp.type = param->type;
1996 cp.interval = cpu_to_le16(param->interval);
1997 cp.window = cpu_to_le16(param->window);
1998
Johan Hedberg42c6b122013-03-05 20:37:49 +02001999 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002000}
2001
Johan Hedberg42c6b122013-03-05 20:37:49 +02002002static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002003{
2004 struct hci_cp_le_set_scan_enable cp;
2005
2006 memset(&cp, 0, sizeof(cp));
Andre Guedes76a388b2013-04-04 20:21:02 -03002007 cp.enable = LE_SCAN_ENABLE;
Andre Guedes525e2962013-04-04 20:21:01 -03002008 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002009
Johan Hedberg42c6b122013-03-05 20:37:49 +02002010 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002011}
2012
2013static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002014 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002015{
2016 long timeo = msecs_to_jiffies(3000);
2017 struct le_scan_params param;
2018 int err;
2019
2020 BT_DBG("%s", hdev->name);
2021
2022 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2023 return -EINPROGRESS;
2024
2025 param.type = type;
2026 param.interval = interval;
2027 param.window = window;
2028
2029 hci_req_lock(hdev);
2030
Johan Hedberg01178cd2013-03-05 20:37:41 +02002031 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2032 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002033 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02002034 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002035
2036 hci_req_unlock(hdev);
2037
2038 if (err < 0)
2039 return err;
2040
Johan Hedberg46818ed2013-01-14 22:33:52 +02002041 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
Andre Guedesb6c75152013-04-04 20:20:59 -03002042 timeout);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002043
2044 return 0;
2045}
2046
Andre Guedes7dbfac12012-03-15 16:52:07 -03002047int hci_cancel_le_scan(struct hci_dev *hdev)
2048{
2049 BT_DBG("%s", hdev->name);
2050
2051 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2052 return -EALREADY;
2053
2054 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2055 struct hci_cp_le_set_scan_enable cp;
2056
2057 /* Send HCI command to disable LE Scan */
2058 memset(&cp, 0, sizeof(cp));
2059 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2060 }
2061
2062 return 0;
2063}
2064
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002065static void le_scan_disable_work(struct work_struct *work)
2066{
2067 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002068 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002069 struct hci_cp_le_set_scan_enable cp;
2070
2071 BT_DBG("%s", hdev->name);
2072
2073 memset(&cp, 0, sizeof(cp));
2074
2075 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2076}
2077
Andre Guedes28b75a82012-02-03 17:48:00 -03002078static void le_scan_work(struct work_struct *work)
2079{
2080 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2081 struct le_scan_params *param = &hdev->le_scan_params;
2082
2083 BT_DBG("%s", hdev->name);
2084
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002085 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2086 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03002087}
2088
2089int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002090 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03002091{
2092 struct le_scan_params *param = &hdev->le_scan_params;
2093
2094 BT_DBG("%s", hdev->name);
2095
Johan Hedbergf15504782012-10-24 21:12:03 +03002096 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2097 return -ENOTSUPP;
2098
Andre Guedes28b75a82012-02-03 17:48:00 -03002099 if (work_busy(&hdev->le_scan))
2100 return -EINPROGRESS;
2101
2102 param->type = type;
2103 param->interval = interval;
2104 param->window = window;
2105 param->timeout = timeout;
2106
2107 queue_work(system_long_wq, &hdev->le_scan);
2108
2109 return 0;
2110}
2111
David Herrmann9be0dab2012-04-22 14:39:57 +02002112/* Alloc HCI device */
2113struct hci_dev *hci_alloc_dev(void)
2114{
2115 struct hci_dev *hdev;
2116
2117 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2118 if (!hdev)
2119 return NULL;
2120
David Herrmannb1b813d2012-04-22 14:39:58 +02002121 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2122 hdev->esco_type = (ESCO_HV1);
2123 hdev->link_mode = (HCI_LM_ACCEPT);
2124 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002125 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2126 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002127
David Herrmannb1b813d2012-04-22 14:39:58 +02002128 hdev->sniff_max_interval = 800;
2129 hdev->sniff_min_interval = 80;
2130
2131 mutex_init(&hdev->lock);
2132 mutex_init(&hdev->req_lock);
2133
2134 INIT_LIST_HEAD(&hdev->mgmt_pending);
2135 INIT_LIST_HEAD(&hdev->blacklist);
2136 INIT_LIST_HEAD(&hdev->uuids);
2137 INIT_LIST_HEAD(&hdev->link_keys);
2138 INIT_LIST_HEAD(&hdev->long_term_keys);
2139 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002140 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002141
2142 INIT_WORK(&hdev->rx_work, hci_rx_work);
2143 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2144 INIT_WORK(&hdev->tx_work, hci_tx_work);
2145 INIT_WORK(&hdev->power_on, hci_power_on);
2146 INIT_WORK(&hdev->le_scan, le_scan_work);
2147
David Herrmannb1b813d2012-04-22 14:39:58 +02002148 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2149 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2150 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2151
David Herrmannb1b813d2012-04-22 14:39:58 +02002152 skb_queue_head_init(&hdev->rx_q);
2153 skb_queue_head_init(&hdev->cmd_q);
2154 skb_queue_head_init(&hdev->raw_q);
2155
2156 init_waitqueue_head(&hdev->req_wait_q);
2157
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002158 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002159
David Herrmannb1b813d2012-04-22 14:39:58 +02002160 hci_init_sysfs(hdev);
2161 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002162
2163 return hdev;
2164}
2165EXPORT_SYMBOL(hci_alloc_dev);
2166
2167/* Free HCI device */
2168void hci_free_dev(struct hci_dev *hdev)
2169{
David Herrmann9be0dab2012-04-22 14:39:57 +02002170 /* will free via device release */
2171 put_device(&hdev->dev);
2172}
2173EXPORT_SYMBOL(hci_free_dev);
2174
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175/* Register HCI device */
2176int hci_register_dev(struct hci_dev *hdev)
2177{
David Herrmannb1b813d2012-04-22 14:39:58 +02002178 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
David Herrmann010666a2012-01-07 15:47:07 +01002180 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 return -EINVAL;
2182
Mat Martineau08add512011-11-02 16:18:36 -07002183 /* Do not allow HCI_AMP devices to register at index 0,
2184 * so the index can be used as the AMP controller ID.
2185 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002186 switch (hdev->dev_type) {
2187 case HCI_BREDR:
2188 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2189 break;
2190 case HCI_AMP:
2191 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2192 break;
2193 default:
2194 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002196
Sasha Levin3df92b32012-05-27 22:36:56 +02002197 if (id < 0)
2198 return id;
2199
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 sprintf(hdev->name, "hci%d", id);
2201 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002202
2203 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2204
Sasha Levin3df92b32012-05-27 22:36:56 +02002205 write_lock(&hci_dev_list_lock);
2206 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002207 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002209 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002210 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002211 if (!hdev->workqueue) {
2212 error = -ENOMEM;
2213 goto err;
2214 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002215
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002216 hdev->req_workqueue = alloc_workqueue(hdev->name,
2217 WQ_HIGHPRI | WQ_UNBOUND |
2218 WQ_MEM_RECLAIM, 1);
2219 if (!hdev->req_workqueue) {
2220 destroy_workqueue(hdev->workqueue);
2221 error = -ENOMEM;
2222 goto err;
2223 }
2224
David Herrmann33ca9542011-10-08 14:58:49 +02002225 error = hci_add_sysfs(hdev);
2226 if (error < 0)
2227 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002229 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002230 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2231 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002232 if (hdev->rfkill) {
2233 if (rfkill_register(hdev->rfkill) < 0) {
2234 rfkill_destroy(hdev->rfkill);
2235 hdev->rfkill = NULL;
2236 }
2237 }
2238
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002239 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002240
2241 if (hdev->dev_type != HCI_AMP)
2242 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002245 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
Johan Hedberg19202572013-01-14 22:33:51 +02002247 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002250
David Herrmann33ca9542011-10-08 14:58:49 +02002251err_wqueue:
2252 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002253 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002254err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002255 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002256 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002257 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002258 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002259
David Herrmann33ca9542011-10-08 14:58:49 +02002260 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261}
2262EXPORT_SYMBOL(hci_register_dev);
2263
2264/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002265void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266{
Sasha Levin3df92b32012-05-27 22:36:56 +02002267 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002268
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002269 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
Johan Hovold94324962012-03-15 14:48:41 +01002271 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2272
Sasha Levin3df92b32012-05-27 22:36:56 +02002273 id = hdev->id;
2274
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002275 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002277 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
2279 hci_dev_do_close(hdev);
2280
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302281 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002282 kfree_skb(hdev->reassembly[i]);
2283
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002284 cancel_work_sync(&hdev->power_on);
2285
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002286 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002287 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002288 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002289 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002290 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002291 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002292
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002293 /* mgmt_index_removed should take care of emptying the
2294 * pending list */
2295 BUG_ON(!list_empty(&hdev->mgmt_pending));
2296
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 hci_notify(hdev, HCI_DEV_UNREG);
2298
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002299 if (hdev->rfkill) {
2300 rfkill_unregister(hdev->rfkill);
2301 rfkill_destroy(hdev->rfkill);
2302 }
2303
David Herrmannce242972011-10-08 14:58:48 +02002304 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002305
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002306 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002307 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002308
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002309 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002310 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002311 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002312 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002313 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002314 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002315 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002316
David Herrmanndc946bd2012-01-07 15:47:24 +01002317 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002318
2319 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320}
2321EXPORT_SYMBOL(hci_unregister_dev);
2322
2323/* Suspend HCI device */
2324int hci_suspend_dev(struct hci_dev *hdev)
2325{
2326 hci_notify(hdev, HCI_DEV_SUSPEND);
2327 return 0;
2328}
2329EXPORT_SYMBOL(hci_suspend_dev);
2330
2331/* Resume HCI device */
2332int hci_resume_dev(struct hci_dev *hdev)
2333{
2334 hci_notify(hdev, HCI_DEV_RESUME);
2335 return 0;
2336}
2337EXPORT_SYMBOL(hci_resume_dev);
2338
Marcel Holtmann76bca882009-11-18 00:40:39 +01002339/* Receive frame from HCI drivers */
2340int hci_recv_frame(struct sk_buff *skb)
2341{
2342 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2343 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002344 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002345 kfree_skb(skb);
2346 return -ENXIO;
2347 }
2348
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002349 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002350 bt_cb(skb)->incoming = 1;
2351
2352 /* Time stamp */
2353 __net_timestamp(skb);
2354
Marcel Holtmann76bca882009-11-18 00:40:39 +01002355 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002356 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002357
Marcel Holtmann76bca882009-11-18 00:40:39 +01002358 return 0;
2359}
2360EXPORT_SYMBOL(hci_recv_frame);
2361
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302362static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002363 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302364{
2365 int len = 0;
2366 int hlen = 0;
2367 int remain = count;
2368 struct sk_buff *skb;
2369 struct bt_skb_cb *scb;
2370
2371 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002372 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302373 return -EILSEQ;
2374
2375 skb = hdev->reassembly[index];
2376
2377 if (!skb) {
2378 switch (type) {
2379 case HCI_ACLDATA_PKT:
2380 len = HCI_MAX_FRAME_SIZE;
2381 hlen = HCI_ACL_HDR_SIZE;
2382 break;
2383 case HCI_EVENT_PKT:
2384 len = HCI_MAX_EVENT_SIZE;
2385 hlen = HCI_EVENT_HDR_SIZE;
2386 break;
2387 case HCI_SCODATA_PKT:
2388 len = HCI_MAX_SCO_SIZE;
2389 hlen = HCI_SCO_HDR_SIZE;
2390 break;
2391 }
2392
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002393 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302394 if (!skb)
2395 return -ENOMEM;
2396
2397 scb = (void *) skb->cb;
2398 scb->expect = hlen;
2399 scb->pkt_type = type;
2400
2401 skb->dev = (void *) hdev;
2402 hdev->reassembly[index] = skb;
2403 }
2404
2405 while (count) {
2406 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002407 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302408
2409 memcpy(skb_put(skb, len), data, len);
2410
2411 count -= len;
2412 data += len;
2413 scb->expect -= len;
2414 remain = count;
2415
2416 switch (type) {
2417 case HCI_EVENT_PKT:
2418 if (skb->len == HCI_EVENT_HDR_SIZE) {
2419 struct hci_event_hdr *h = hci_event_hdr(skb);
2420 scb->expect = h->plen;
2421
2422 if (skb_tailroom(skb) < scb->expect) {
2423 kfree_skb(skb);
2424 hdev->reassembly[index] = NULL;
2425 return -ENOMEM;
2426 }
2427 }
2428 break;
2429
2430 case HCI_ACLDATA_PKT:
2431 if (skb->len == HCI_ACL_HDR_SIZE) {
2432 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2433 scb->expect = __le16_to_cpu(h->dlen);
2434
2435 if (skb_tailroom(skb) < scb->expect) {
2436 kfree_skb(skb);
2437 hdev->reassembly[index] = NULL;
2438 return -ENOMEM;
2439 }
2440 }
2441 break;
2442
2443 case HCI_SCODATA_PKT:
2444 if (skb->len == HCI_SCO_HDR_SIZE) {
2445 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2446 scb->expect = h->dlen;
2447
2448 if (skb_tailroom(skb) < scb->expect) {
2449 kfree_skb(skb);
2450 hdev->reassembly[index] = NULL;
2451 return -ENOMEM;
2452 }
2453 }
2454 break;
2455 }
2456
2457 if (scb->expect == 0) {
2458 /* Complete frame */
2459
2460 bt_cb(skb)->pkt_type = type;
2461 hci_recv_frame(skb);
2462
2463 hdev->reassembly[index] = NULL;
2464 return remain;
2465 }
2466 }
2467
2468 return remain;
2469}
2470
Marcel Holtmannef222012007-07-11 06:42:04 +02002471int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2472{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302473 int rem = 0;
2474
Marcel Holtmannef222012007-07-11 06:42:04 +02002475 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2476 return -EILSEQ;
2477
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002478 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002479 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302480 if (rem < 0)
2481 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002482
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302483 data += (count - rem);
2484 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002485 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002486
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302487 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002488}
2489EXPORT_SYMBOL(hci_recv_fragment);
2490
Suraj Sumangala99811512010-07-14 13:02:19 +05302491#define STREAM_REASSEMBLY 0
2492
2493int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2494{
2495 int type;
2496 int rem = 0;
2497
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002498 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302499 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2500
2501 if (!skb) {
2502 struct { char type; } *pkt;
2503
2504 /* Start of the frame */
2505 pkt = data;
2506 type = pkt->type;
2507
2508 data++;
2509 count--;
2510 } else
2511 type = bt_cb(skb)->pkt_type;
2512
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002513 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002514 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302515 if (rem < 0)
2516 return rem;
2517
2518 data += (count - rem);
2519 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002520 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302521
2522 return rem;
2523}
2524EXPORT_SYMBOL(hci_recv_stream_fragment);
2525
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526/* ---- Interface to upper protocols ---- */
2527
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528int hci_register_cb(struct hci_cb *cb)
2529{
2530 BT_DBG("%p name %s", cb, cb->name);
2531
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002532 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002534 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
2536 return 0;
2537}
2538EXPORT_SYMBOL(hci_register_cb);
2539
2540int hci_unregister_cb(struct hci_cb *cb)
2541{
2542 BT_DBG("%p name %s", cb, cb->name);
2543
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002544 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002546 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
2548 return 0;
2549}
2550EXPORT_SYMBOL(hci_unregister_cb);
2551
2552static int hci_send_frame(struct sk_buff *skb)
2553{
2554 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2555
2556 if (!hdev) {
2557 kfree_skb(skb);
2558 return -ENODEV;
2559 }
2560
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002561 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002563 /* Time stamp */
2564 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002566 /* Send copy to monitor */
2567 hci_send_to_monitor(hdev, skb);
2568
2569 if (atomic_read(&hdev->promisc)) {
2570 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002571 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 }
2573
2574 /* Get rid of skb owner, prior to sending to the driver. */
2575 skb_orphan(skb);
2576
2577 return hdev->send(skb);
2578}
2579
Johan Hedberg3119ae92013-03-05 20:37:44 +02002580void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2581{
2582 skb_queue_head_init(&req->cmd_q);
2583 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002584 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002585}
2586
2587int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2588{
2589 struct hci_dev *hdev = req->hdev;
2590 struct sk_buff *skb;
2591 unsigned long flags;
2592
2593 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2594
Andre Guedes5d73e032013-03-08 11:20:16 -03002595 /* If an error occured during request building, remove all HCI
2596 * commands queued on the HCI request queue.
2597 */
2598 if (req->err) {
2599 skb_queue_purge(&req->cmd_q);
2600 return req->err;
2601 }
2602
Johan Hedberg3119ae92013-03-05 20:37:44 +02002603 /* Do not allow empty requests */
2604 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002605 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002606
2607 skb = skb_peek_tail(&req->cmd_q);
2608 bt_cb(skb)->req.complete = complete;
2609
2610 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2611 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2612 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2613
2614 queue_work(hdev->workqueue, &hdev->cmd_work);
2615
2616 return 0;
2617}
2618
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002619static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002620 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621{
2622 int len = HCI_COMMAND_HDR_SIZE + plen;
2623 struct hci_command_hdr *hdr;
2624 struct sk_buff *skb;
2625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002627 if (!skb)
2628 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
2630 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002631 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 hdr->plen = plen;
2633
2634 if (plen)
2635 memcpy(skb_put(skb, plen), param, plen);
2636
2637 BT_DBG("skb len %d", skb->len);
2638
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002639 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002641
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002642 return skb;
2643}
2644
2645/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002646int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2647 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002648{
2649 struct sk_buff *skb;
2650
2651 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2652
2653 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2654 if (!skb) {
2655 BT_ERR("%s no memory for command", hdev->name);
2656 return -ENOMEM;
2657 }
2658
Johan Hedberg11714b32013-03-05 20:37:47 +02002659 /* Stand-alone HCI commands must be flaged as
2660 * single-command requests.
2661 */
2662 bt_cb(skb)->req.start = true;
2663
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002665 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666
2667 return 0;
2668}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
Johan Hedberg71c76a12013-03-05 20:37:46 +02002670/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002671void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2672 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002673{
2674 struct hci_dev *hdev = req->hdev;
2675 struct sk_buff *skb;
2676
2677 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2678
Andre Guedes34739c12013-03-08 11:20:18 -03002679 /* If an error occured during request building, there is no point in
2680 * queueing the HCI command. We can simply return.
2681 */
2682 if (req->err)
2683 return;
2684
Johan Hedberg71c76a12013-03-05 20:37:46 +02002685 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2686 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002687 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2688 hdev->name, opcode);
2689 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002690 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002691 }
2692
2693 if (skb_queue_empty(&req->cmd_q))
2694 bt_cb(skb)->req.start = true;
2695
Johan Hedberg02350a72013-04-03 21:50:29 +03002696 bt_cb(skb)->req.event = event;
2697
Johan Hedberg71c76a12013-03-05 20:37:46 +02002698 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002699}
2700
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002701void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2702 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002703{
2704 hci_req_add_ev(req, opcode, plen, param, 0);
2705}
2706
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002708void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709{
2710 struct hci_command_hdr *hdr;
2711
2712 if (!hdev->sent_cmd)
2713 return NULL;
2714
2715 hdr = (void *) hdev->sent_cmd->data;
2716
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002717 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 return NULL;
2719
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002720 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721
2722 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2723}
2724
2725/* Send ACL data */
2726static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2727{
2728 struct hci_acl_hdr *hdr;
2729 int len = skb->len;
2730
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002731 skb_push(skb, HCI_ACL_HDR_SIZE);
2732 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002733 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002734 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2735 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736}
2737
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002738static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002739 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002741 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 struct hci_dev *hdev = conn->hdev;
2743 struct sk_buff *list;
2744
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002745 skb->len = skb_headlen(skb);
2746 skb->data_len = 0;
2747
2748 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002749
2750 switch (hdev->dev_type) {
2751 case HCI_BREDR:
2752 hci_add_acl_hdr(skb, conn->handle, flags);
2753 break;
2754 case HCI_AMP:
2755 hci_add_acl_hdr(skb, chan->handle, flags);
2756 break;
2757 default:
2758 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2759 return;
2760 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002761
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002762 list = skb_shinfo(skb)->frag_list;
2763 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 /* Non fragmented */
2765 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2766
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002767 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 } else {
2769 /* Fragmented */
2770 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2771
2772 skb_shinfo(skb)->frag_list = NULL;
2773
2774 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002775 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002777 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002778
2779 flags &= ~ACL_START;
2780 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 do {
2782 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002783
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002785 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002786 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
2788 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2789
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002790 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 } while (list);
2792
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002793 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002795}
2796
2797void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2798{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002799 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002800
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002801 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002802
2803 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002804
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002805 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002807 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
2810/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002811void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812{
2813 struct hci_dev *hdev = conn->hdev;
2814 struct hci_sco_hdr hdr;
2815
2816 BT_DBG("%s len %d", hdev->name, skb->len);
2817
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002818 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 hdr.dlen = skb->len;
2820
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002821 skb_push(skb, HCI_SCO_HDR_SIZE);
2822 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002823 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824
2825 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002826 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002827
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002829 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831
2832/* ---- HCI TX task (outgoing data) ---- */
2833
2834/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002835static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2836 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837{
2838 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002839 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002840 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002842 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002844
2845 rcu_read_lock();
2846
2847 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002848 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002850
2851 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2852 continue;
2853
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 num++;
2855
2856 if (c->sent < min) {
2857 min = c->sent;
2858 conn = c;
2859 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002860
2861 if (hci_conn_num(hdev, type) == num)
2862 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 }
2864
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002865 rcu_read_unlock();
2866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002868 int cnt, q;
2869
2870 switch (conn->type) {
2871 case ACL_LINK:
2872 cnt = hdev->acl_cnt;
2873 break;
2874 case SCO_LINK:
2875 case ESCO_LINK:
2876 cnt = hdev->sco_cnt;
2877 break;
2878 case LE_LINK:
2879 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2880 break;
2881 default:
2882 cnt = 0;
2883 BT_ERR("Unknown link type");
2884 }
2885
2886 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 *quote = q ? q : 1;
2888 } else
2889 *quote = 0;
2890
2891 BT_DBG("conn %p quote %d", conn, *quote);
2892 return conn;
2893}
2894
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002895static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896{
2897 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002898 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
Ville Tervobae1f5d92011-02-10 22:38:53 -03002900 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002902 rcu_read_lock();
2903
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002905 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002906 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002907 BT_ERR("%s killing stalled connection %pMR",
2908 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002909 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 }
2911 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002912
2913 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914}
2915
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002916static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2917 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002918{
2919 struct hci_conn_hash *h = &hdev->conn_hash;
2920 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002921 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002922 struct hci_conn *conn;
2923 int cnt, q, conn_num = 0;
2924
2925 BT_DBG("%s", hdev->name);
2926
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002927 rcu_read_lock();
2928
2929 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002930 struct hci_chan *tmp;
2931
2932 if (conn->type != type)
2933 continue;
2934
2935 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2936 continue;
2937
2938 conn_num++;
2939
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002940 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002941 struct sk_buff *skb;
2942
2943 if (skb_queue_empty(&tmp->data_q))
2944 continue;
2945
2946 skb = skb_peek(&tmp->data_q);
2947 if (skb->priority < cur_prio)
2948 continue;
2949
2950 if (skb->priority > cur_prio) {
2951 num = 0;
2952 min = ~0;
2953 cur_prio = skb->priority;
2954 }
2955
2956 num++;
2957
2958 if (conn->sent < min) {
2959 min = conn->sent;
2960 chan = tmp;
2961 }
2962 }
2963
2964 if (hci_conn_num(hdev, type) == conn_num)
2965 break;
2966 }
2967
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002968 rcu_read_unlock();
2969
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002970 if (!chan)
2971 return NULL;
2972
2973 switch (chan->conn->type) {
2974 case ACL_LINK:
2975 cnt = hdev->acl_cnt;
2976 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002977 case AMP_LINK:
2978 cnt = hdev->block_cnt;
2979 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002980 case SCO_LINK:
2981 case ESCO_LINK:
2982 cnt = hdev->sco_cnt;
2983 break;
2984 case LE_LINK:
2985 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2986 break;
2987 default:
2988 cnt = 0;
2989 BT_ERR("Unknown link type");
2990 }
2991
2992 q = cnt / num;
2993 *quote = q ? q : 1;
2994 BT_DBG("chan %p quote %d", chan, *quote);
2995 return chan;
2996}
2997
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002998static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2999{
3000 struct hci_conn_hash *h = &hdev->conn_hash;
3001 struct hci_conn *conn;
3002 int num = 0;
3003
3004 BT_DBG("%s", hdev->name);
3005
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003006 rcu_read_lock();
3007
3008 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003009 struct hci_chan *chan;
3010
3011 if (conn->type != type)
3012 continue;
3013
3014 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3015 continue;
3016
3017 num++;
3018
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003019 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003020 struct sk_buff *skb;
3021
3022 if (chan->sent) {
3023 chan->sent = 0;
3024 continue;
3025 }
3026
3027 if (skb_queue_empty(&chan->data_q))
3028 continue;
3029
3030 skb = skb_peek(&chan->data_q);
3031 if (skb->priority >= HCI_PRIO_MAX - 1)
3032 continue;
3033
3034 skb->priority = HCI_PRIO_MAX - 1;
3035
3036 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003037 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003038 }
3039
3040 if (hci_conn_num(hdev, type) == num)
3041 break;
3042 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003043
3044 rcu_read_unlock();
3045
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003046}
3047
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003048static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3049{
3050 /* Calculate count of blocks used by this packet */
3051 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3052}
3053
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003054static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 if (!test_bit(HCI_RAW, &hdev->flags)) {
3057 /* ACL tx timeout must be longer than maximum
3058 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003059 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003060 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003061 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003063}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003065static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003066{
3067 unsigned int cnt = hdev->acl_cnt;
3068 struct hci_chan *chan;
3069 struct sk_buff *skb;
3070 int quote;
3071
3072 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003073
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003074 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003075 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003076 u32 priority = (skb_peek(&chan->data_q))->priority;
3077 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003078 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003079 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003080
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003081 /* Stop if priority has changed */
3082 if (skb->priority < priority)
3083 break;
3084
3085 skb = skb_dequeue(&chan->data_q);
3086
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003087 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003088 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003089
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 hci_send_frame(skb);
3091 hdev->acl_last_tx = jiffies;
3092
3093 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003094 chan->sent++;
3095 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096 }
3097 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003098
3099 if (cnt != hdev->acl_cnt)
3100 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101}
3102
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003103static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003104{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003105 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003106 struct hci_chan *chan;
3107 struct sk_buff *skb;
3108 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003109 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003110
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003111 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003112
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003113 BT_DBG("%s", hdev->name);
3114
3115 if (hdev->dev_type == HCI_AMP)
3116 type = AMP_LINK;
3117 else
3118 type = ACL_LINK;
3119
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003120 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003121 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003122 u32 priority = (skb_peek(&chan->data_q))->priority;
3123 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3124 int blocks;
3125
3126 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003127 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003128
3129 /* Stop if priority has changed */
3130 if (skb->priority < priority)
3131 break;
3132
3133 skb = skb_dequeue(&chan->data_q);
3134
3135 blocks = __get_blocks(hdev, skb);
3136 if (blocks > hdev->block_cnt)
3137 return;
3138
3139 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003140 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003141
3142 hci_send_frame(skb);
3143 hdev->acl_last_tx = jiffies;
3144
3145 hdev->block_cnt -= blocks;
3146 quote -= blocks;
3147
3148 chan->sent += blocks;
3149 chan->conn->sent += blocks;
3150 }
3151 }
3152
3153 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003154 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003155}
3156
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003157static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003158{
3159 BT_DBG("%s", hdev->name);
3160
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003161 /* No ACL link over BR/EDR controller */
3162 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3163 return;
3164
3165 /* No AMP link over AMP controller */
3166 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003167 return;
3168
3169 switch (hdev->flow_ctl_mode) {
3170 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3171 hci_sched_acl_pkt(hdev);
3172 break;
3173
3174 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3175 hci_sched_acl_blk(hdev);
3176 break;
3177 }
3178}
3179
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003181static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182{
3183 struct hci_conn *conn;
3184 struct sk_buff *skb;
3185 int quote;
3186
3187 BT_DBG("%s", hdev->name);
3188
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003189 if (!hci_conn_num(hdev, SCO_LINK))
3190 return;
3191
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3193 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3194 BT_DBG("skb %p len %d", skb, skb->len);
3195 hci_send_frame(skb);
3196
3197 conn->sent++;
3198 if (conn->sent == ~0)
3199 conn->sent = 0;
3200 }
3201 }
3202}
3203
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003204static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003205{
3206 struct hci_conn *conn;
3207 struct sk_buff *skb;
3208 int quote;
3209
3210 BT_DBG("%s", hdev->name);
3211
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003212 if (!hci_conn_num(hdev, ESCO_LINK))
3213 return;
3214
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003215 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3216 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003217 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3218 BT_DBG("skb %p len %d", skb, skb->len);
3219 hci_send_frame(skb);
3220
3221 conn->sent++;
3222 if (conn->sent == ~0)
3223 conn->sent = 0;
3224 }
3225 }
3226}
3227
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003228static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003229{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003230 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003231 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003232 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003233
3234 BT_DBG("%s", hdev->name);
3235
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003236 if (!hci_conn_num(hdev, LE_LINK))
3237 return;
3238
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003239 if (!test_bit(HCI_RAW, &hdev->flags)) {
3240 /* LE tx timeout must be longer than maximum
3241 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003242 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003243 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003244 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003245 }
3246
3247 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003248 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003249 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003250 u32 priority = (skb_peek(&chan->data_q))->priority;
3251 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003252 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003253 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003254
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003255 /* Stop if priority has changed */
3256 if (skb->priority < priority)
3257 break;
3258
3259 skb = skb_dequeue(&chan->data_q);
3260
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003261 hci_send_frame(skb);
3262 hdev->le_last_tx = jiffies;
3263
3264 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003265 chan->sent++;
3266 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003267 }
3268 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003269
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003270 if (hdev->le_pkts)
3271 hdev->le_cnt = cnt;
3272 else
3273 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003274
3275 if (cnt != tmp)
3276 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003277}
3278
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003279static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003281 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 struct sk_buff *skb;
3283
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003284 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003285 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286
3287 /* Schedule queues and send stuff to HCI driver */
3288
3289 hci_sched_acl(hdev);
3290
3291 hci_sched_sco(hdev);
3292
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003293 hci_sched_esco(hdev);
3294
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003295 hci_sched_le(hdev);
3296
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 /* Send next queued raw (unknown type) packet */
3298 while ((skb = skb_dequeue(&hdev->raw_q)))
3299 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300}
3301
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003302/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303
3304/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003305static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306{
3307 struct hci_acl_hdr *hdr = (void *) skb->data;
3308 struct hci_conn *conn;
3309 __u16 handle, flags;
3310
3311 skb_pull(skb, HCI_ACL_HDR_SIZE);
3312
3313 handle = __le16_to_cpu(hdr->handle);
3314 flags = hci_flags(handle);
3315 handle = hci_handle(handle);
3316
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003317 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003318 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319
3320 hdev->stat.acl_rx++;
3321
3322 hci_dev_lock(hdev);
3323 conn = hci_conn_hash_lookup_handle(hdev, handle);
3324 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003325
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003327 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003328
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003330 l2cap_recv_acldata(conn, skb, flags);
3331 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003333 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003334 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 }
3336
3337 kfree_skb(skb);
3338}
3339
3340/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003341static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342{
3343 struct hci_sco_hdr *hdr = (void *) skb->data;
3344 struct hci_conn *conn;
3345 __u16 handle;
3346
3347 skb_pull(skb, HCI_SCO_HDR_SIZE);
3348
3349 handle = __le16_to_cpu(hdr->handle);
3350
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003351 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352
3353 hdev->stat.sco_rx++;
3354
3355 hci_dev_lock(hdev);
3356 conn = hci_conn_hash_lookup_handle(hdev, handle);
3357 hci_dev_unlock(hdev);
3358
3359 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003361 sco_recv_scodata(conn, skb);
3362 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003364 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003365 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 }
3367
3368 kfree_skb(skb);
3369}
3370
Johan Hedberg9238f362013-03-05 20:37:48 +02003371static bool hci_req_is_complete(struct hci_dev *hdev)
3372{
3373 struct sk_buff *skb;
3374
3375 skb = skb_peek(&hdev->cmd_q);
3376 if (!skb)
3377 return true;
3378
3379 return bt_cb(skb)->req.start;
3380}
3381
Johan Hedberg42c6b122013-03-05 20:37:49 +02003382static void hci_resend_last(struct hci_dev *hdev)
3383{
3384 struct hci_command_hdr *sent;
3385 struct sk_buff *skb;
3386 u16 opcode;
3387
3388 if (!hdev->sent_cmd)
3389 return;
3390
3391 sent = (void *) hdev->sent_cmd->data;
3392 opcode = __le16_to_cpu(sent->opcode);
3393 if (opcode == HCI_OP_RESET)
3394 return;
3395
3396 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3397 if (!skb)
3398 return;
3399
3400 skb_queue_head(&hdev->cmd_q, skb);
3401 queue_work(hdev->workqueue, &hdev->cmd_work);
3402}
3403
Johan Hedberg9238f362013-03-05 20:37:48 +02003404void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3405{
3406 hci_req_complete_t req_complete = NULL;
3407 struct sk_buff *skb;
3408 unsigned long flags;
3409
3410 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3411
Johan Hedberg42c6b122013-03-05 20:37:49 +02003412 /* If the completed command doesn't match the last one that was
3413 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003414 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003415 if (!hci_sent_cmd_data(hdev, opcode)) {
3416 /* Some CSR based controllers generate a spontaneous
3417 * reset complete event during init and any pending
3418 * command will never be completed. In such a case we
3419 * need to resend whatever was the last sent
3420 * command.
3421 */
3422 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3423 hci_resend_last(hdev);
3424
Johan Hedberg9238f362013-03-05 20:37:48 +02003425 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003426 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003427
3428 /* If the command succeeded and there's still more commands in
3429 * this request the request is not yet complete.
3430 */
3431 if (!status && !hci_req_is_complete(hdev))
3432 return;
3433
3434 /* If this was the last command in a request the complete
3435 * callback would be found in hdev->sent_cmd instead of the
3436 * command queue (hdev->cmd_q).
3437 */
3438 if (hdev->sent_cmd) {
3439 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3440 if (req_complete)
3441 goto call_complete;
3442 }
3443
3444 /* Remove all pending commands belonging to this request */
3445 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3446 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3447 if (bt_cb(skb)->req.start) {
3448 __skb_queue_head(&hdev->cmd_q, skb);
3449 break;
3450 }
3451
3452 req_complete = bt_cb(skb)->req.complete;
3453 kfree_skb(skb);
3454 }
3455 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3456
3457call_complete:
3458 if (req_complete)
3459 req_complete(hdev, status);
3460}
3461
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003462static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003464 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 struct sk_buff *skb;
3466
3467 BT_DBG("%s", hdev->name);
3468
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003470 /* Send copy to monitor */
3471 hci_send_to_monitor(hdev, skb);
3472
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 if (atomic_read(&hdev->promisc)) {
3474 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003475 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 }
3477
3478 if (test_bit(HCI_RAW, &hdev->flags)) {
3479 kfree_skb(skb);
3480 continue;
3481 }
3482
3483 if (test_bit(HCI_INIT, &hdev->flags)) {
3484 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003485 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 case HCI_ACLDATA_PKT:
3487 case HCI_SCODATA_PKT:
3488 kfree_skb(skb);
3489 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 }
3492
3493 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003494 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003496 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 hci_event_packet(hdev, skb);
3498 break;
3499
3500 case HCI_ACLDATA_PKT:
3501 BT_DBG("%s ACL data packet", hdev->name);
3502 hci_acldata_packet(hdev, skb);
3503 break;
3504
3505 case HCI_SCODATA_PKT:
3506 BT_DBG("%s SCO data packet", hdev->name);
3507 hci_scodata_packet(hdev, skb);
3508 break;
3509
3510 default:
3511 kfree_skb(skb);
3512 break;
3513 }
3514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515}
3516
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003517static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003519 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 struct sk_buff *skb;
3521
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003522 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3523 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003526 if (atomic_read(&hdev->cmd_cnt)) {
3527 skb = skb_dequeue(&hdev->cmd_q);
3528 if (!skb)
3529 return;
3530
Wei Yongjun7585b972009-02-25 18:29:52 +08003531 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003533 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3534 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535 atomic_dec(&hdev->cmd_cnt);
3536 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003537 if (test_bit(HCI_RESET, &hdev->flags))
3538 del_timer(&hdev->cmd_timer);
3539 else
3540 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003541 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 } else {
3543 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003544 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545 }
3546 }
3547}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003548
3549int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3550{
3551 /* General inquiry access code (GIAC) */
3552 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3553 struct hci_cp_inquiry cp;
3554
3555 BT_DBG("%s", hdev->name);
3556
3557 if (test_bit(HCI_INQUIRY, &hdev->flags))
3558 return -EINPROGRESS;
3559
Johan Hedberg46632622012-01-02 16:06:08 +02003560 inquiry_cache_flush(hdev);
3561
Andre Guedes2519a1f2011-11-07 11:45:24 -03003562 memset(&cp, 0, sizeof(cp));
3563 memcpy(&cp.lap, lap, sizeof(cp.lap));
3564 cp.length = length;
3565
3566 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3567}
Andre Guedes023d50492011-11-04 14:16:52 -03003568
3569int hci_cancel_inquiry(struct hci_dev *hdev)
3570{
3571 BT_DBG("%s", hdev->name);
3572
3573 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003574 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003575
3576 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3577}
Andre Guedes31f79562012-04-24 21:02:53 -03003578
3579u8 bdaddr_to_le(u8 bdaddr_type)
3580{
3581 switch (bdaddr_type) {
3582 case BDADDR_LE_PUBLIC:
3583 return ADDR_LE_DEV_PUBLIC;
3584
3585 default:
3586 /* Fallback to LE Random address type */
3587 return ADDR_LE_DEV_RANDOM;
3588 }
3589}