blob: 9570358adb771140ab20c46b7c9071bfcb5186d7 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Johan Hedberg7b1abbb2013-04-03 21:54:47 +030082struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030083{
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300106 if (event) {
107 if (hdr->evt != event)
108 goto failed;
109 return skb;
110 }
111
Johan Hedberg75e84b72013-04-02 13:35:04 +0300112 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
114 goto failed;
115 }
116
117 if (skb->len < sizeof(*ev)) {
118 BT_ERR("Too short cmd_complete event");
119 goto failed;
120 }
121
122 ev = (void *) skb->data;
123 skb_pull(skb, sizeof(*ev));
124
125 if (opcode == __le16_to_cpu(ev->opcode))
126 return skb;
127
128 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129 __le16_to_cpu(ev->opcode));
130
131failed:
132 kfree_skb(skb);
133 return ERR_PTR(-ENODATA);
134}
135
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300136struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
137 void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300138{
139 DECLARE_WAITQUEUE(wait, current);
140 struct hci_request req;
141 int err = 0;
142
143 BT_DBG("%s", hdev->name);
144
145 hci_req_init(&req, hdev);
146
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300147 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300148
149 hdev->req_status = HCI_REQ_PEND;
150
151 err = hci_req_run(&req, hci_req_sync_complete);
152 if (err < 0)
153 return ERR_PTR(err);
154
155 add_wait_queue(&hdev->req_wait_q, &wait);
156 set_current_state(TASK_INTERRUPTIBLE);
157
158 schedule_timeout(timeout);
159
160 remove_wait_queue(&hdev->req_wait_q, &wait);
161
162 if (signal_pending(current))
163 return ERR_PTR(-EINTR);
164
165 switch (hdev->req_status) {
166 case HCI_REQ_DONE:
167 err = -bt_to_errno(hdev->req_result);
168 break;
169
170 case HCI_REQ_CANCELED:
171 err = -hdev->req_result;
172 break;
173
174 default:
175 err = -ETIMEDOUT;
176 break;
177 }
178
179 hdev->req_status = hdev->req_result = 0;
180
181 BT_DBG("%s end: err %d", hdev->name, err);
182
183 if (err < 0)
184 return ERR_PTR(err);
185
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300186 return hci_get_cmd_complete(hdev, opcode, event);
187}
188EXPORT_SYMBOL(__hci_cmd_sync_ev);
189
190struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
191 void *param, u32 timeout)
192{
193 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300194}
195EXPORT_SYMBOL(__hci_cmd_sync);
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200198static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200199 void (*func)(struct hci_request *req,
200 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200201 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 DECLARE_WAITQUEUE(wait, current);
205 int err = 0;
206
207 BT_DBG("%s start", hdev->name);
208
Johan Hedberg42c6b122013-03-05 20:37:49 +0200209 hci_req_init(&req, hdev);
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 hdev->req_status = HCI_REQ_PEND;
212
Johan Hedberg42c6b122013-03-05 20:37:49 +0200213 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200214
Johan Hedberg42c6b122013-03-05 20:37:49 +0200215 err = hci_req_run(&req, hci_req_sync_complete);
216 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200217 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200223 */
Andre Guedes920c8302013-03-08 11:20:15 -0300224 if (err == -ENODATA)
225 return 0;
226
227 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200228 }
229
Andre Guedesbc4445c2013-03-08 11:20:13 -0300230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 schedule_timeout(timeout);
234
235 remove_wait_queue(&hdev->req_wait_q, &wait);
236
237 if (signal_pending(current))
238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700242 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 break;
244
245 case HCI_REQ_CANCELED:
246 err = -hdev->req_result;
247 break;
248
249 default:
250 err = -ETIMEDOUT;
251 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Johan Hedberga5040ef2011-01-10 13:28:59 +0200254 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 BT_DBG("%s end: err %d", hdev->name, err);
257
258 return err;
259}
260
Johan Hedberg01178cd2013-03-05 20:37:41 +0200261static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200262 void (*req)(struct hci_request *req,
263 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200264 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
266 int ret;
267
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200268 if (!test_bit(HCI_UP, &hdev->flags))
269 return -ENETDOWN;
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 /* Serialize all requests */
272 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200273 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 hci_req_unlock(hdev);
275
276 return ret;
277}
278
Johan Hedberg42c6b122013-03-05 20:37:49 +0200279static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200281 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200284 set_bit(HCI_RESET, &req->hdev->flags);
285 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
Johan Hedberg42c6b122013-03-05 20:37:49 +0200288static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200290 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200295 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200296 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200297
298 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200299 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200303{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200304 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200305
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200306 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200307 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300308
309 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200310 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300311
312 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200313 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200314}
315
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200317{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200319
320 BT_DBG("%s %ld", hdev->name, opt);
321
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300322 /* Reset */
323 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200324 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300325
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200326 switch (hdev->dev_type) {
327 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200328 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200329 break;
330
331 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200332 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200333 break;
334
335 default:
336 BT_ERR("Unknown device type %d", hdev->dev_type);
337 break;
338 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339}
340
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200342{
343 struct hci_cp_delete_stored_link_key cp;
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
367 bacpy(&cp.bdaddr, BDADDR_ANY);
368 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200369 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedbergf332ec62013-03-15 17:07:11 -0500370
371 /* Read page scan parameters */
372 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
373 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
375 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200376}
377
Johan Hedberg42c6b122013-03-05 20:37:49 +0200378static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200379{
380 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200382
383 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200385
386 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200391
392 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200394}
395
396static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
397{
398 if (lmp_ext_inq_capable(hdev))
399 return 0x02;
400
401 if (lmp_inq_rssi_capable(hdev))
402 return 0x01;
403
404 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
405 hdev->lmp_subver == 0x0757)
406 return 0x01;
407
408 if (hdev->manufacturer == 15) {
409 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
410 return 0x01;
411 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
414 return 0x01;
415 }
416
417 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
418 hdev->lmp_subver == 0x1805)
419 return 0x01;
420
421 return 0x00;
422}
423
Johan Hedberg42c6b122013-03-05 20:37:49 +0200424static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200425{
426 u8 mode;
427
Johan Hedberg42c6b122013-03-05 20:37:49 +0200428 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431}
432
Johan Hedberg42c6b122013-03-05 20:37:49 +0200433static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200434{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 struct hci_dev *hdev = req->hdev;
436
Johan Hedberg2177bab2013-03-05 20:37:43 +0200437 /* The second byte is 0xff instead of 0x9f (two reserved bits
438 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
439 * command otherwise.
440 */
441 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
442
443 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
444 * any event mask for pre 1.2 devices.
445 */
446 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
447 return;
448
449 if (lmp_bredr_capable(hdev)) {
450 events[4] |= 0x01; /* Flow Specification Complete */
451 events[4] |= 0x02; /* Inquiry Result with RSSI */
452 events[4] |= 0x04; /* Read Remote Extended Features Complete */
453 events[5] |= 0x08; /* Synchronous Connection Complete */
454 events[5] |= 0x10; /* Synchronous Connection Changed */
455 }
456
457 if (lmp_inq_rssi_capable(hdev))
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459
460 if (lmp_sniffsubr_capable(hdev))
461 events[5] |= 0x20; /* Sniff Subrating */
462
463 if (lmp_pause_enc_capable(hdev))
464 events[5] |= 0x80; /* Encryption Key Refresh Complete */
465
466 if (lmp_ext_inq_capable(hdev))
467 events[5] |= 0x40; /* Extended Inquiry Result */
468
469 if (lmp_no_flush_capable(hdev))
470 events[7] |= 0x01; /* Enhanced Flush Complete */
471
472 if (lmp_lsto_capable(hdev))
473 events[6] |= 0x80; /* Link Supervision Timeout Changed */
474
475 if (lmp_ssp_capable(hdev)) {
476 events[6] |= 0x01; /* IO Capability Request */
477 events[6] |= 0x02; /* IO Capability Response */
478 events[6] |= 0x04; /* User Confirmation Request */
479 events[6] |= 0x08; /* User Passkey Request */
480 events[6] |= 0x10; /* Remote OOB Data Request */
481 events[6] |= 0x20; /* Simple Pairing Complete */
482 events[7] |= 0x04; /* User Passkey Notification */
483 events[7] |= 0x08; /* Keypress Notification */
484 events[7] |= 0x10; /* Remote Host Supported
485 * Features Notification
486 */
487 }
488
489 if (lmp_le_capable(hdev))
490 events[7] |= 0x20; /* LE Meta-Event */
491
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200493
494 if (lmp_le_capable(hdev)) {
495 memset(events, 0, sizeof(events));
496 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200497 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
498 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499 }
500}
501
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200503{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 struct hci_dev *hdev = req->hdev;
505
Johan Hedberg2177bab2013-03-05 20:37:43 +0200506 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200507 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508
509 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200511
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513
514 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200515 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200516
517 if (lmp_ssp_capable(hdev)) {
518 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
519 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200520 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
521 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522 } else {
523 struct hci_cp_write_eir cp;
524
525 memset(hdev->eir, 0, sizeof(hdev->eir));
526 memset(&cp, 0, sizeof(cp));
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 }
530 }
531
532 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200534
535 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200536 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200537
538 if (lmp_ext_feat_capable(hdev)) {
539 struct hci_cp_read_local_ext_features cp;
540
541 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200542 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
543 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200544 }
545
546 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
547 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
549 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200550 }
551}
552
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200556 struct hci_cp_write_def_link_policy cp;
557 u16 link_policy = 0;
558
559 if (lmp_rswitch_capable(hdev))
560 link_policy |= HCI_LP_RSWITCH;
561 if (lmp_hold_capable(hdev))
562 link_policy |= HCI_LP_HOLD;
563 if (lmp_sniff_capable(hdev))
564 link_policy |= HCI_LP_SNIFF;
565 if (lmp_park_capable(hdev))
566 link_policy |= HCI_LP_PARK;
567
568 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200569 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200570}
571
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575 struct hci_cp_write_le_host_supported cp;
576
577 memset(&cp, 0, sizeof(cp));
578
579 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
580 cp.le = 0x01;
581 cp.simul = lmp_le_br_capable(hdev);
582 }
583
584 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200585 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
586 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
592
Johan Hedberg2177bab2013-03-05 20:37:43 +0200593 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500596 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200597 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500598 hci_update_ad(req);
599 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600}
601
602static int __hci_init(struct hci_dev *hdev)
603{
604 int err;
605
606 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
607 if (err < 0)
608 return err;
609
610 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
611 * BR/EDR/LE type controllers. AMP controllers only need the
612 * first stage init.
613 */
614 if (hdev->dev_type != HCI_BREDR)
615 return 0;
616
617 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
618 if (err < 0)
619 return err;
620
621 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
622}
623
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625{
626 __u8 scan = opt;
627
Johan Hedberg42c6b122013-03-05 20:37:49 +0200628 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
630 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200631 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632}
633
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635{
636 __u8 auth = opt;
637
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
640 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200641 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642}
643
Johan Hedberg42c6b122013-03-05 20:37:49 +0200644static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645{
646 __u8 encrypt = opt;
647
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200650 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200655{
656 __le16 policy = cpu_to_le16(opt);
657
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200659
660 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200662}
663
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900664/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 * Device is held on return. */
666struct hci_dev *hci_dev_get(int index)
667{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200668 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670 BT_DBG("%d", index);
671
672 if (index < 0)
673 return NULL;
674
675 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200676 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 if (d->id == index) {
678 hdev = hci_dev_hold(d);
679 break;
680 }
681 }
682 read_unlock(&hci_dev_list_lock);
683 return hdev;
684}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200687
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200688bool hci_discovery_active(struct hci_dev *hdev)
689{
690 struct discovery_state *discov = &hdev->discovery;
691
Andre Guedes6fbe1952012-02-03 17:47:58 -0300692 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300693 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300694 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200695 return true;
696
Andre Guedes6fbe1952012-02-03 17:47:58 -0300697 default:
698 return false;
699 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200700}
701
Johan Hedbergff9ef572012-01-04 14:23:45 +0200702void hci_discovery_set_state(struct hci_dev *hdev, int state)
703{
704 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
705
706 if (hdev->discovery.state == state)
707 return;
708
709 switch (state) {
710 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300711 if (hdev->discovery.state != DISCOVERY_STARTING)
712 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200713 break;
714 case DISCOVERY_STARTING:
715 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300716 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200717 mgmt_discovering(hdev, 1);
718 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200719 case DISCOVERY_RESOLVING:
720 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200721 case DISCOVERY_STOPPING:
722 break;
723 }
724
725 hdev->discovery.state = state;
726}
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728static void inquiry_cache_flush(struct hci_dev *hdev)
729{
Johan Hedberg30883512012-01-04 14:16:21 +0200730 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200731 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
Johan Hedberg561aafb2012-01-04 13:31:59 +0200733 list_for_each_entry_safe(p, n, &cache->all, all) {
734 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200735 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200737
738 INIT_LIST_HEAD(&cache->unknown);
739 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740}
741
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300742struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
743 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744{
Johan Hedberg30883512012-01-04 14:16:21 +0200745 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 struct inquiry_entry *e;
747
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300748 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Johan Hedberg561aafb2012-01-04 13:31:59 +0200750 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200752 return e;
753 }
754
755 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756}
757
Johan Hedberg561aafb2012-01-04 13:31:59 +0200758struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300759 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200760{
Johan Hedberg30883512012-01-04 14:16:21 +0200761 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200762 struct inquiry_entry *e;
763
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300764 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200765
766 list_for_each_entry(e, &cache->unknown, list) {
767 if (!bacmp(&e->data.bdaddr, bdaddr))
768 return e;
769 }
770
771 return NULL;
772}
773
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200774struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300775 bdaddr_t *bdaddr,
776 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200777{
778 struct discovery_state *cache = &hdev->discovery;
779 struct inquiry_entry *e;
780
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300781 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200782
783 list_for_each_entry(e, &cache->resolve, list) {
784 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
785 return e;
786 if (!bacmp(&e->data.bdaddr, bdaddr))
787 return e;
788 }
789
790 return NULL;
791}
792
Johan Hedberga3d4e202012-01-09 00:53:02 +0200793void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300794 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200795{
796 struct discovery_state *cache = &hdev->discovery;
797 struct list_head *pos = &cache->resolve;
798 struct inquiry_entry *p;
799
800 list_del(&ie->list);
801
802 list_for_each_entry(p, &cache->resolve, list) {
803 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300804 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200805 break;
806 pos = &p->list;
807 }
808
809 list_add(&ie->list, pos);
810}
811
Johan Hedberg31754052012-01-04 13:39:52 +0200812bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300813 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814{
Johan Hedberg30883512012-01-04 14:16:21 +0200815 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200816 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300818 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Szymon Janc2b2fec42012-11-20 11:38:54 +0100820 hci_remove_remote_oob_data(hdev, &data->bdaddr);
821
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200822 if (ssp)
823 *ssp = data->ssp_mode;
824
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200825 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200826 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200827 if (ie->data.ssp_mode && ssp)
828 *ssp = true;
829
Johan Hedberga3d4e202012-01-09 00:53:02 +0200830 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300831 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200832 ie->data.rssi = data->rssi;
833 hci_inquiry_cache_update_resolve(hdev, ie);
834 }
835
Johan Hedberg561aafb2012-01-04 13:31:59 +0200836 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200837 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200838
Johan Hedberg561aafb2012-01-04 13:31:59 +0200839 /* Entry not in the cache. Add new one. */
840 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
841 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200842 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200843
844 list_add(&ie->all, &cache->all);
845
846 if (name_known) {
847 ie->name_state = NAME_KNOWN;
848 } else {
849 ie->name_state = NAME_NOT_KNOWN;
850 list_add(&ie->list, &cache->unknown);
851 }
852
853update:
854 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300855 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856 ie->name_state = NAME_KNOWN;
857 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 }
859
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200860 memcpy(&ie->data, data, sizeof(*data));
861 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200863
864 if (ie->name_state == NAME_NOT_KNOWN)
865 return false;
866
867 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868}
869
870static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
871{
Johan Hedberg30883512012-01-04 14:16:21 +0200872 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 struct inquiry_info *info = (struct inquiry_info *) buf;
874 struct inquiry_entry *e;
875 int copied = 0;
876
Johan Hedberg561aafb2012-01-04 13:31:59 +0200877 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200879
880 if (copied >= num)
881 break;
882
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 bacpy(&info->bdaddr, &data->bdaddr);
884 info->pscan_rep_mode = data->pscan_rep_mode;
885 info->pscan_period_mode = data->pscan_period_mode;
886 info->pscan_mode = data->pscan_mode;
887 memcpy(info->dev_class, data->dev_class, 3);
888 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200891 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 }
893
894 BT_DBG("cache %p, copied %d", cache, copied);
895 return copied;
896}
897
Johan Hedberg42c6b122013-03-05 20:37:49 +0200898static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899{
900 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200901 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 struct hci_cp_inquiry cp;
903
904 BT_DBG("%s", hdev->name);
905
906 if (test_bit(HCI_INQUIRY, &hdev->flags))
907 return;
908
909 /* Start Inquiry */
910 memcpy(&cp.lap, &ir->lap, 3);
911 cp.length = ir->length;
912 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914}
915
Andre Guedes3e13fa12013-03-27 20:04:56 -0300916static int wait_inquiry(void *word)
917{
918 schedule();
919 return signal_pending(current);
920}
921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922int hci_inquiry(void __user *arg)
923{
924 __u8 __user *ptr = arg;
925 struct hci_inquiry_req ir;
926 struct hci_dev *hdev;
927 int err = 0, do_inquiry = 0, max_rsp;
928 long timeo;
929 __u8 *buf;
930
931 if (copy_from_user(&ir, ptr, sizeof(ir)))
932 return -EFAULT;
933
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200934 hdev = hci_dev_get(ir.dev_id);
935 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 return -ENODEV;
937
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300938 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900939 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300940 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 inquiry_cache_flush(hdev);
942 do_inquiry = 1;
943 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300944 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
Marcel Holtmann04837f62006-07-03 10:02:33 +0200946 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200947
948 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200949 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
950 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200951 if (err < 0)
952 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300953
954 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
955 * cleared). If it is interrupted by a signal, return -EINTR.
956 */
957 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
958 TASK_INTERRUPTIBLE))
959 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300962 /* for unlimited number of responses we will use buffer with
963 * 255 entries
964 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
966
967 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
968 * copy it to the user space.
969 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100970 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200971 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 err = -ENOMEM;
973 goto done;
974 }
975
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300976 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300978 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 BT_DBG("num_rsp %d", ir.num_rsp);
981
982 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
983 ptr += sizeof(ir);
984 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300985 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900987 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 err = -EFAULT;
989
990 kfree(buf);
991
992done:
993 hci_dev_put(hdev);
994 return err;
995}
996
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100997static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
998{
999 u8 ad_len = 0, flags = 0;
1000 size_t name_len;
1001
1002 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1003 flags |= LE_AD_GENERAL;
1004
1005 if (!lmp_bredr_capable(hdev))
1006 flags |= LE_AD_NO_BREDR;
1007
1008 if (lmp_le_br_capable(hdev))
1009 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1010
1011 if (lmp_host_le_br_capable(hdev))
1012 flags |= LE_AD_SIM_LE_BREDR_HOST;
1013
1014 if (flags) {
1015 BT_DBG("adv flags 0x%02x", flags);
1016
1017 ptr[0] = 2;
1018 ptr[1] = EIR_FLAGS;
1019 ptr[2] = flags;
1020
1021 ad_len += 3;
1022 ptr += 3;
1023 }
1024
1025 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1026 ptr[0] = 2;
1027 ptr[1] = EIR_TX_POWER;
1028 ptr[2] = (u8) hdev->adv_tx_power;
1029
1030 ad_len += 3;
1031 ptr += 3;
1032 }
1033
1034 name_len = strlen(hdev->dev_name);
1035 if (name_len > 0) {
1036 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1037
1038 if (name_len > max_len) {
1039 name_len = max_len;
1040 ptr[1] = EIR_NAME_SHORT;
1041 } else
1042 ptr[1] = EIR_NAME_COMPLETE;
1043
1044 ptr[0] = name_len + 1;
1045
1046 memcpy(ptr + 2, hdev->dev_name, name_len);
1047
1048 ad_len += (name_len + 2);
1049 ptr += (name_len + 2);
1050 }
1051
1052 return ad_len;
1053}
1054
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001055void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001056{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001057 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001058 struct hci_cp_le_set_adv_data cp;
1059 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001060
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001061 if (!lmp_le_capable(hdev))
1062 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001063
1064 memset(&cp, 0, sizeof(cp));
1065
1066 len = create_ad(hdev, cp.data);
1067
1068 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001069 memcmp(cp.data, hdev->adv_data, len) == 0)
1070 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001071
1072 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1073 hdev->adv_data_len = len;
1074
1075 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001076
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001077 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001078}
1079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080/* ---- HCI ioctl helpers ---- */
1081
1082int hci_dev_open(__u16 dev)
1083{
1084 struct hci_dev *hdev;
1085 int ret = 0;
1086
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001087 hdev = hci_dev_get(dev);
1088 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 return -ENODEV;
1090
1091 BT_DBG("%s %p", hdev->name, hdev);
1092
1093 hci_req_lock(hdev);
1094
Johan Hovold94324962012-03-15 14:48:41 +01001095 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1096 ret = -ENODEV;
1097 goto done;
1098 }
1099
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001100 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1101 ret = -ERFKILL;
1102 goto done;
1103 }
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 if (test_bit(HCI_UP, &hdev->flags)) {
1106 ret = -EALREADY;
1107 goto done;
1108 }
1109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 if (hdev->open(hdev)) {
1111 ret = -EIO;
1112 goto done;
1113 }
1114
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001115 atomic_set(&hdev->cmd_cnt, 1);
1116 set_bit(HCI_INIT, &hdev->flags);
1117
1118 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1119 ret = hdev->setup(hdev);
1120
1121 if (!ret) {
1122 /* Treat all non BR/EDR controllers as raw devices if
1123 * enable_hs is not set.
1124 */
1125 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1126 set_bit(HCI_RAW, &hdev->flags);
1127
1128 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1129 set_bit(HCI_RAW, &hdev->flags);
1130
1131 if (!test_bit(HCI_RAW, &hdev->flags))
1132 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
1134
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001135 clear_bit(HCI_INIT, &hdev->flags);
1136
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 if (!ret) {
1138 hci_dev_hold(hdev);
1139 set_bit(HCI_UP, &hdev->flags);
1140 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001141 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1142 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001143 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001144 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001145 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001146 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001147 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001149 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001150 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001151 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
1153 skb_queue_purge(&hdev->cmd_q);
1154 skb_queue_purge(&hdev->rx_q);
1155
1156 if (hdev->flush)
1157 hdev->flush(hdev);
1158
1159 if (hdev->sent_cmd) {
1160 kfree_skb(hdev->sent_cmd);
1161 hdev->sent_cmd = NULL;
1162 }
1163
1164 hdev->close(hdev);
1165 hdev->flags = 0;
1166 }
1167
1168done:
1169 hci_req_unlock(hdev);
1170 hci_dev_put(hdev);
1171 return ret;
1172}
1173
1174static int hci_dev_do_close(struct hci_dev *hdev)
1175{
1176 BT_DBG("%s %p", hdev->name, hdev);
1177
Andre Guedes28b75a82012-02-03 17:48:00 -03001178 cancel_work_sync(&hdev->le_scan);
1179
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001180 cancel_delayed_work(&hdev->power_off);
1181
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 hci_req_cancel(hdev, ENODEV);
1183 hci_req_lock(hdev);
1184
1185 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001186 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 hci_req_unlock(hdev);
1188 return 0;
1189 }
1190
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001191 /* Flush RX and TX works */
1192 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001193 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001195 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001196 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001197 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001198 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001199 }
1200
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001201 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001202 cancel_delayed_work(&hdev->service_cache);
1203
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001204 cancel_delayed_work_sync(&hdev->le_scan_disable);
1205
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001206 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 inquiry_cache_flush(hdev);
1208 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001209 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211 hci_notify(hdev, HCI_DEV_DOWN);
1212
1213 if (hdev->flush)
1214 hdev->flush(hdev);
1215
1216 /* Reset device */
1217 skb_queue_purge(&hdev->cmd_q);
1218 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001219 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001220 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001222 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 clear_bit(HCI_INIT, &hdev->flags);
1224 }
1225
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001226 /* flush cmd work */
1227 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
1229 /* Drop queues */
1230 skb_queue_purge(&hdev->rx_q);
1231 skb_queue_purge(&hdev->cmd_q);
1232 skb_queue_purge(&hdev->raw_q);
1233
1234 /* Drop last sent command */
1235 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001236 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 kfree_skb(hdev->sent_cmd);
1238 hdev->sent_cmd = NULL;
1239 }
1240
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001241 kfree_skb(hdev->recv_evt);
1242 hdev->recv_evt = NULL;
1243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 /* After this point our queues are empty
1245 * and no tasks are scheduled. */
1246 hdev->close(hdev);
1247
Johan Hedberg35b973c2013-03-15 17:06:59 -05001248 /* Clear flags */
1249 hdev->flags = 0;
1250 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1251
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001252 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1253 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001254 hci_dev_lock(hdev);
1255 mgmt_powered(hdev, 0);
1256 hci_dev_unlock(hdev);
1257 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001258
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001259 /* Controller radio is available but is currently powered down */
1260 hdev->amp_status = 0;
1261
Johan Hedberge59fda82012-02-22 18:11:53 +02001262 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001263 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001264
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 hci_req_unlock(hdev);
1266
1267 hci_dev_put(hdev);
1268 return 0;
1269}
1270
1271int hci_dev_close(__u16 dev)
1272{
1273 struct hci_dev *hdev;
1274 int err;
1275
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001276 hdev = hci_dev_get(dev);
1277 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001279
1280 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1281 cancel_delayed_work(&hdev->power_off);
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 hci_dev_put(hdev);
1286 return err;
1287}
1288
1289int hci_dev_reset(__u16 dev)
1290{
1291 struct hci_dev *hdev;
1292 int ret = 0;
1293
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001294 hdev = hci_dev_get(dev);
1295 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 return -ENODEV;
1297
1298 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
1300 if (!test_bit(HCI_UP, &hdev->flags))
1301 goto done;
1302
1303 /* Drop queues */
1304 skb_queue_purge(&hdev->rx_q);
1305 skb_queue_purge(&hdev->cmd_q);
1306
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001307 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 inquiry_cache_flush(hdev);
1309 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001310 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
1312 if (hdev->flush)
1313 hdev->flush(hdev);
1314
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001315 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001316 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
1318 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001319 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 hci_req_unlock(hdev);
1323 hci_dev_put(hdev);
1324 return ret;
1325}
1326
1327int hci_dev_reset_stat(__u16 dev)
1328{
1329 struct hci_dev *hdev;
1330 int ret = 0;
1331
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001332 hdev = hci_dev_get(dev);
1333 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 return -ENODEV;
1335
1336 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1337
1338 hci_dev_put(hdev);
1339
1340 return ret;
1341}
1342
1343int hci_dev_cmd(unsigned int cmd, void __user *arg)
1344{
1345 struct hci_dev *hdev;
1346 struct hci_dev_req dr;
1347 int err = 0;
1348
1349 if (copy_from_user(&dr, arg, sizeof(dr)))
1350 return -EFAULT;
1351
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001352 hdev = hci_dev_get(dr.dev_id);
1353 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 return -ENODEV;
1355
1356 switch (cmd) {
1357 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001358 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1359 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 break;
1361
1362 case HCISETENCRYPT:
1363 if (!lmp_encrypt_capable(hdev)) {
1364 err = -EOPNOTSUPP;
1365 break;
1366 }
1367
1368 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1369 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001370 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1371 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 if (err)
1373 break;
1374 }
1375
Johan Hedberg01178cd2013-03-05 20:37:41 +02001376 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1377 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 break;
1379
1380 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001381 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1382 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 break;
1384
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001385 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001386 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1387 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001388 break;
1389
1390 case HCISETLINKMODE:
1391 hdev->link_mode = ((__u16) dr.dev_opt) &
1392 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1393 break;
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 case HCISETPTYPE:
1396 hdev->pkt_type = (__u16) dr.dev_opt;
1397 break;
1398
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001400 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1401 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 break;
1403
1404 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001405 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1406 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 break;
1408
1409 default:
1410 err = -EINVAL;
1411 break;
1412 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 hci_dev_put(hdev);
1415 return err;
1416}
1417
1418int hci_get_dev_list(void __user *arg)
1419{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001420 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 struct hci_dev_list_req *dl;
1422 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 int n = 0, size, err;
1424 __u16 dev_num;
1425
1426 if (get_user(dev_num, (__u16 __user *) arg))
1427 return -EFAULT;
1428
1429 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1430 return -EINVAL;
1431
1432 size = sizeof(*dl) + dev_num * sizeof(*dr);
1433
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001434 dl = kzalloc(size, GFP_KERNEL);
1435 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 return -ENOMEM;
1437
1438 dr = dl->dev_req;
1439
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001440 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001441 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001442 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001443 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001444
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001445 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1446 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 (dr + n)->dev_id = hdev->id;
1449 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 if (++n >= dev_num)
1452 break;
1453 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001454 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 dl->dev_num = n;
1457 size = sizeof(*dl) + n * sizeof(*dr);
1458
1459 err = copy_to_user(arg, dl, size);
1460 kfree(dl);
1461
1462 return err ? -EFAULT : 0;
1463}
1464
1465int hci_get_dev_info(void __user *arg)
1466{
1467 struct hci_dev *hdev;
1468 struct hci_dev_info di;
1469 int err = 0;
1470
1471 if (copy_from_user(&di, arg, sizeof(di)))
1472 return -EFAULT;
1473
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001474 hdev = hci_dev_get(di.dev_id);
1475 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 return -ENODEV;
1477
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001478 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001479 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001480
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001481 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1482 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 strcpy(di.name, hdev->name);
1485 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001486 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 di.flags = hdev->flags;
1488 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001489 if (lmp_bredr_capable(hdev)) {
1490 di.acl_mtu = hdev->acl_mtu;
1491 di.acl_pkts = hdev->acl_pkts;
1492 di.sco_mtu = hdev->sco_mtu;
1493 di.sco_pkts = hdev->sco_pkts;
1494 } else {
1495 di.acl_mtu = hdev->le_mtu;
1496 di.acl_pkts = hdev->le_pkts;
1497 di.sco_mtu = 0;
1498 di.sco_pkts = 0;
1499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 di.link_policy = hdev->link_policy;
1501 di.link_mode = hdev->link_mode;
1502
1503 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1504 memcpy(&di.features, &hdev->features, sizeof(di.features));
1505
1506 if (copy_to_user(arg, &di, sizeof(di)))
1507 err = -EFAULT;
1508
1509 hci_dev_put(hdev);
1510
1511 return err;
1512}
1513
1514/* ---- Interface to HCI drivers ---- */
1515
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001516static int hci_rfkill_set_block(void *data, bool blocked)
1517{
1518 struct hci_dev *hdev = data;
1519
1520 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1521
1522 if (!blocked)
1523 return 0;
1524
1525 hci_dev_do_close(hdev);
1526
1527 return 0;
1528}
1529
1530static const struct rfkill_ops hci_rfkill_ops = {
1531 .set_block = hci_rfkill_set_block,
1532};
1533
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001534static void hci_power_on(struct work_struct *work)
1535{
1536 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1537
1538 BT_DBG("%s", hdev->name);
1539
1540 if (hci_dev_open(hdev->id) < 0)
1541 return;
1542
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001543 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001544 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1545 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001546
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001547 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001548 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001549}
1550
1551static void hci_power_off(struct work_struct *work)
1552{
Johan Hedberg32435532011-11-07 22:16:04 +02001553 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001554 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001555
1556 BT_DBG("%s", hdev->name);
1557
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001558 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001559}
1560
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001561static void hci_discov_off(struct work_struct *work)
1562{
1563 struct hci_dev *hdev;
1564 u8 scan = SCAN_PAGE;
1565
1566 hdev = container_of(work, struct hci_dev, discov_off.work);
1567
1568 BT_DBG("%s", hdev->name);
1569
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001570 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001571
1572 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1573
1574 hdev->discov_timeout = 0;
1575
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001576 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001577}
1578
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001579int hci_uuids_clear(struct hci_dev *hdev)
1580{
Johan Hedberg48210022013-01-27 00:31:28 +02001581 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001582
Johan Hedberg48210022013-01-27 00:31:28 +02001583 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1584 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001585 kfree(uuid);
1586 }
1587
1588 return 0;
1589}
1590
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001591int hci_link_keys_clear(struct hci_dev *hdev)
1592{
1593 struct list_head *p, *n;
1594
1595 list_for_each_safe(p, n, &hdev->link_keys) {
1596 struct link_key *key;
1597
1598 key = list_entry(p, struct link_key, list);
1599
1600 list_del(p);
1601 kfree(key);
1602 }
1603
1604 return 0;
1605}
1606
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001607int hci_smp_ltks_clear(struct hci_dev *hdev)
1608{
1609 struct smp_ltk *k, *tmp;
1610
1611 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1612 list_del(&k->list);
1613 kfree(k);
1614 }
1615
1616 return 0;
1617}
1618
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001619struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1620{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001621 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001622
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001623 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001624 if (bacmp(bdaddr, &k->bdaddr) == 0)
1625 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001626
1627 return NULL;
1628}
1629
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301630static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001631 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001632{
1633 /* Legacy key */
1634 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301635 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001636
1637 /* Debug keys are insecure so don't store them persistently */
1638 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301639 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001640
1641 /* Changed combination key and there's no previous one */
1642 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301643 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001644
1645 /* Security mode 3 case */
1646 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301647 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001648
1649 /* Neither local nor remote side had no-bonding as requirement */
1650 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301651 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001652
1653 /* Local side had dedicated bonding as requirement */
1654 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301655 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001656
1657 /* Remote side had dedicated bonding as requirement */
1658 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301659 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001660
1661 /* If none of the above criteria match, then don't store the key
1662 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301663 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001664}
1665
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001666struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001667{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001668 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001669
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001670 list_for_each_entry(k, &hdev->long_term_keys, list) {
1671 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001672 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001673 continue;
1674
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001675 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001676 }
1677
1678 return NULL;
1679}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001680
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001681struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001682 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001683{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001684 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001685
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001686 list_for_each_entry(k, &hdev->long_term_keys, list)
1687 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001688 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001689 return k;
1690
1691 return NULL;
1692}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001693
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001694int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001696{
1697 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301698 u8 old_key_type;
1699 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001700
1701 old_key = hci_find_link_key(hdev, bdaddr);
1702 if (old_key) {
1703 old_key_type = old_key->type;
1704 key = old_key;
1705 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001706 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001707 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1708 if (!key)
1709 return -ENOMEM;
1710 list_add(&key->list, &hdev->link_keys);
1711 }
1712
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001713 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001714
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001715 /* Some buggy controller combinations generate a changed
1716 * combination key for legacy pairing even when there's no
1717 * previous key */
1718 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001719 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001720 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001721 if (conn)
1722 conn->key_type = type;
1723 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001724
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001725 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001726 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001727 key->pin_len = pin_len;
1728
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001729 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001730 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001731 else
1732 key->type = type;
1733
Johan Hedberg4df378a2011-04-28 11:29:03 -07001734 if (!new_key)
1735 return 0;
1736
1737 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1738
Johan Hedberg744cf192011-11-08 20:40:14 +02001739 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001740
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301741 if (conn)
1742 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001743
1744 return 0;
1745}
1746
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001747int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001748 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001749 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001750{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001751 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001752
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001753 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1754 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001755
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001756 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1757 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001758 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001759 else {
1760 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001761 if (!key)
1762 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001763 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001764 }
1765
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001766 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001767 key->bdaddr_type = addr_type;
1768 memcpy(key->val, tk, sizeof(key->val));
1769 key->authenticated = authenticated;
1770 key->ediv = ediv;
1771 key->enc_size = enc_size;
1772 key->type = type;
1773 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001774
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001775 if (!new_key)
1776 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001777
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001778 if (type & HCI_SMP_LTK)
1779 mgmt_new_ltk(hdev, key, 1);
1780
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001781 return 0;
1782}
1783
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001784int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1785{
1786 struct link_key *key;
1787
1788 key = hci_find_link_key(hdev, bdaddr);
1789 if (!key)
1790 return -ENOENT;
1791
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001792 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001793
1794 list_del(&key->list);
1795 kfree(key);
1796
1797 return 0;
1798}
1799
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001800int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1801{
1802 struct smp_ltk *k, *tmp;
1803
1804 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1805 if (bacmp(bdaddr, &k->bdaddr))
1806 continue;
1807
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001808 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001809
1810 list_del(&k->list);
1811 kfree(k);
1812 }
1813
1814 return 0;
1815}
1816
Ville Tervo6bd32322011-02-16 16:32:41 +02001817/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001818static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001819{
1820 struct hci_dev *hdev = (void *) arg;
1821
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001822 if (hdev->sent_cmd) {
1823 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1824 u16 opcode = __le16_to_cpu(sent->opcode);
1825
1826 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1827 } else {
1828 BT_ERR("%s command tx timeout", hdev->name);
1829 }
1830
Ville Tervo6bd32322011-02-16 16:32:41 +02001831 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001832 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001833}
1834
Szymon Janc2763eda2011-03-22 13:12:22 +01001835struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001836 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001837{
1838 struct oob_data *data;
1839
1840 list_for_each_entry(data, &hdev->remote_oob_data, list)
1841 if (bacmp(bdaddr, &data->bdaddr) == 0)
1842 return data;
1843
1844 return NULL;
1845}
1846
1847int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1848{
1849 struct oob_data *data;
1850
1851 data = hci_find_remote_oob_data(hdev, bdaddr);
1852 if (!data)
1853 return -ENOENT;
1854
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001855 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001856
1857 list_del(&data->list);
1858 kfree(data);
1859
1860 return 0;
1861}
1862
1863int hci_remote_oob_data_clear(struct hci_dev *hdev)
1864{
1865 struct oob_data *data, *n;
1866
1867 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1868 list_del(&data->list);
1869 kfree(data);
1870 }
1871
1872 return 0;
1873}
1874
1875int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001876 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001877{
1878 struct oob_data *data;
1879
1880 data = hci_find_remote_oob_data(hdev, bdaddr);
1881
1882 if (!data) {
1883 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1884 if (!data)
1885 return -ENOMEM;
1886
1887 bacpy(&data->bdaddr, bdaddr);
1888 list_add(&data->list, &hdev->remote_oob_data);
1889 }
1890
1891 memcpy(data->hash, hash, sizeof(data->hash));
1892 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1893
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001894 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001895
1896 return 0;
1897}
1898
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001899struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001900{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001901 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001902
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001903 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001904 if (bacmp(bdaddr, &b->bdaddr) == 0)
1905 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001906
1907 return NULL;
1908}
1909
1910int hci_blacklist_clear(struct hci_dev *hdev)
1911{
1912 struct list_head *p, *n;
1913
1914 list_for_each_safe(p, n, &hdev->blacklist) {
1915 struct bdaddr_list *b;
1916
1917 b = list_entry(p, struct bdaddr_list, list);
1918
1919 list_del(p);
1920 kfree(b);
1921 }
1922
1923 return 0;
1924}
1925
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001926int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001927{
1928 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001929
1930 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1931 return -EBADF;
1932
Antti Julku5e762442011-08-25 16:48:02 +03001933 if (hci_blacklist_lookup(hdev, bdaddr))
1934 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001935
1936 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001937 if (!entry)
1938 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001939
1940 bacpy(&entry->bdaddr, bdaddr);
1941
1942 list_add(&entry->list, &hdev->blacklist);
1943
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001944 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001945}
1946
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001947int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001948{
1949 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001950
Szymon Janc1ec918c2011-11-16 09:32:21 +01001951 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001952 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001953
1954 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001955 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001956 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001957
1958 list_del(&entry->list);
1959 kfree(entry);
1960
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001961 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001962}
1963
Johan Hedberg42c6b122013-03-05 20:37:49 +02001964static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001965{
1966 struct le_scan_params *param = (struct le_scan_params *) opt;
1967 struct hci_cp_le_set_scan_param cp;
1968
1969 memset(&cp, 0, sizeof(cp));
1970 cp.type = param->type;
1971 cp.interval = cpu_to_le16(param->interval);
1972 cp.window = cpu_to_le16(param->window);
1973
Johan Hedberg42c6b122013-03-05 20:37:49 +02001974 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001975}
1976
Johan Hedberg42c6b122013-03-05 20:37:49 +02001977static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001978{
1979 struct hci_cp_le_set_scan_enable cp;
1980
1981 memset(&cp, 0, sizeof(cp));
1982 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001983 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001984
Johan Hedberg42c6b122013-03-05 20:37:49 +02001985 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001986}
1987
1988static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001989 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001990{
1991 long timeo = msecs_to_jiffies(3000);
1992 struct le_scan_params param;
1993 int err;
1994
1995 BT_DBG("%s", hdev->name);
1996
1997 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1998 return -EINPROGRESS;
1999
2000 param.type = type;
2001 param.interval = interval;
2002 param.window = window;
2003
2004 hci_req_lock(hdev);
2005
Johan Hedberg01178cd2013-03-05 20:37:41 +02002006 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2007 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002008 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02002009 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002010
2011 hci_req_unlock(hdev);
2012
2013 if (err < 0)
2014 return err;
2015
Johan Hedberg46818ed2013-01-14 22:33:52 +02002016 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2017 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002018
2019 return 0;
2020}
2021
Andre Guedes7dbfac12012-03-15 16:52:07 -03002022int hci_cancel_le_scan(struct hci_dev *hdev)
2023{
2024 BT_DBG("%s", hdev->name);
2025
2026 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2027 return -EALREADY;
2028
2029 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2030 struct hci_cp_le_set_scan_enable cp;
2031
2032 /* Send HCI command to disable LE Scan */
2033 memset(&cp, 0, sizeof(cp));
2034 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2035 }
2036
2037 return 0;
2038}
2039
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002040static void le_scan_disable_work(struct work_struct *work)
2041{
2042 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002043 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002044 struct hci_cp_le_set_scan_enable cp;
2045
2046 BT_DBG("%s", hdev->name);
2047
2048 memset(&cp, 0, sizeof(cp));
2049
2050 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2051}
2052
Andre Guedes28b75a82012-02-03 17:48:00 -03002053static void le_scan_work(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2056 struct le_scan_params *param = &hdev->le_scan_params;
2057
2058 BT_DBG("%s", hdev->name);
2059
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002060 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2061 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03002062}
2063
2064int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002065 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03002066{
2067 struct le_scan_params *param = &hdev->le_scan_params;
2068
2069 BT_DBG("%s", hdev->name);
2070
Johan Hedbergf15504782012-10-24 21:12:03 +03002071 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2072 return -ENOTSUPP;
2073
Andre Guedes28b75a82012-02-03 17:48:00 -03002074 if (work_busy(&hdev->le_scan))
2075 return -EINPROGRESS;
2076
2077 param->type = type;
2078 param->interval = interval;
2079 param->window = window;
2080 param->timeout = timeout;
2081
2082 queue_work(system_long_wq, &hdev->le_scan);
2083
2084 return 0;
2085}
2086
David Herrmann9be0dab2012-04-22 14:39:57 +02002087/* Alloc HCI device */
2088struct hci_dev *hci_alloc_dev(void)
2089{
2090 struct hci_dev *hdev;
2091
2092 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2093 if (!hdev)
2094 return NULL;
2095
David Herrmannb1b813d2012-04-22 14:39:58 +02002096 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2097 hdev->esco_type = (ESCO_HV1);
2098 hdev->link_mode = (HCI_LM_ACCEPT);
2099 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002100 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2101 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002102
David Herrmannb1b813d2012-04-22 14:39:58 +02002103 hdev->sniff_max_interval = 800;
2104 hdev->sniff_min_interval = 80;
2105
2106 mutex_init(&hdev->lock);
2107 mutex_init(&hdev->req_lock);
2108
2109 INIT_LIST_HEAD(&hdev->mgmt_pending);
2110 INIT_LIST_HEAD(&hdev->blacklist);
2111 INIT_LIST_HEAD(&hdev->uuids);
2112 INIT_LIST_HEAD(&hdev->link_keys);
2113 INIT_LIST_HEAD(&hdev->long_term_keys);
2114 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002115 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002116
2117 INIT_WORK(&hdev->rx_work, hci_rx_work);
2118 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2119 INIT_WORK(&hdev->tx_work, hci_tx_work);
2120 INIT_WORK(&hdev->power_on, hci_power_on);
2121 INIT_WORK(&hdev->le_scan, le_scan_work);
2122
David Herrmannb1b813d2012-04-22 14:39:58 +02002123 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2124 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2125 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2126
David Herrmannb1b813d2012-04-22 14:39:58 +02002127 skb_queue_head_init(&hdev->rx_q);
2128 skb_queue_head_init(&hdev->cmd_q);
2129 skb_queue_head_init(&hdev->raw_q);
2130
2131 init_waitqueue_head(&hdev->req_wait_q);
2132
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002133 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002134
David Herrmannb1b813d2012-04-22 14:39:58 +02002135 hci_init_sysfs(hdev);
2136 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002137
2138 return hdev;
2139}
2140EXPORT_SYMBOL(hci_alloc_dev);
2141
2142/* Free HCI device */
2143void hci_free_dev(struct hci_dev *hdev)
2144{
David Herrmann9be0dab2012-04-22 14:39:57 +02002145 /* will free via device release */
2146 put_device(&hdev->dev);
2147}
2148EXPORT_SYMBOL(hci_free_dev);
2149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150/* Register HCI device */
2151int hci_register_dev(struct hci_dev *hdev)
2152{
David Herrmannb1b813d2012-04-22 14:39:58 +02002153 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
David Herrmann010666a2012-01-07 15:47:07 +01002155 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 return -EINVAL;
2157
Mat Martineau08add512011-11-02 16:18:36 -07002158 /* Do not allow HCI_AMP devices to register at index 0,
2159 * so the index can be used as the AMP controller ID.
2160 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002161 switch (hdev->dev_type) {
2162 case HCI_BREDR:
2163 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2164 break;
2165 case HCI_AMP:
2166 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2167 break;
2168 default:
2169 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002171
Sasha Levin3df92b32012-05-27 22:36:56 +02002172 if (id < 0)
2173 return id;
2174
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 sprintf(hdev->name, "hci%d", id);
2176 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002177
2178 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2179
Sasha Levin3df92b32012-05-27 22:36:56 +02002180 write_lock(&hci_dev_list_lock);
2181 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002182 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002184 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002185 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002186 if (!hdev->workqueue) {
2187 error = -ENOMEM;
2188 goto err;
2189 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002190
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002191 hdev->req_workqueue = alloc_workqueue(hdev->name,
2192 WQ_HIGHPRI | WQ_UNBOUND |
2193 WQ_MEM_RECLAIM, 1);
2194 if (!hdev->req_workqueue) {
2195 destroy_workqueue(hdev->workqueue);
2196 error = -ENOMEM;
2197 goto err;
2198 }
2199
David Herrmann33ca9542011-10-08 14:58:49 +02002200 error = hci_add_sysfs(hdev);
2201 if (error < 0)
2202 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002204 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002205 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2206 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002207 if (hdev->rfkill) {
2208 if (rfkill_register(hdev->rfkill) < 0) {
2209 rfkill_destroy(hdev->rfkill);
2210 hdev->rfkill = NULL;
2211 }
2212 }
2213
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002214 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002215
2216 if (hdev->dev_type != HCI_AMP)
2217 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2218
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002220 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221
Johan Hedberg19202572013-01-14 22:33:51 +02002222 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002223
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002225
David Herrmann33ca9542011-10-08 14:58:49 +02002226err_wqueue:
2227 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002228 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002229err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002230 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002231 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002232 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002233 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002234
David Herrmann33ca9542011-10-08 14:58:49 +02002235 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236}
2237EXPORT_SYMBOL(hci_register_dev);
2238
2239/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002240void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241{
Sasha Levin3df92b32012-05-27 22:36:56 +02002242 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002243
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002244 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245
Johan Hovold94324962012-03-15 14:48:41 +01002246 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2247
Sasha Levin3df92b32012-05-27 22:36:56 +02002248 id = hdev->id;
2249
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002250 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002252 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
2254 hci_dev_do_close(hdev);
2255
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302256 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002257 kfree_skb(hdev->reassembly[i]);
2258
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002259 cancel_work_sync(&hdev->power_on);
2260
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002261 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002262 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002263 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002264 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002265 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002266 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002267
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002268 /* mgmt_index_removed should take care of emptying the
2269 * pending list */
2270 BUG_ON(!list_empty(&hdev->mgmt_pending));
2271
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 hci_notify(hdev, HCI_DEV_UNREG);
2273
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002274 if (hdev->rfkill) {
2275 rfkill_unregister(hdev->rfkill);
2276 rfkill_destroy(hdev->rfkill);
2277 }
2278
David Herrmannce242972011-10-08 14:58:48 +02002279 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002280
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002281 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002282 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002283
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002284 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002285 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002286 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002287 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002288 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002289 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002290 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002291
David Herrmanndc946bd2012-01-07 15:47:24 +01002292 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002293
2294 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295}
2296EXPORT_SYMBOL(hci_unregister_dev);
2297
2298/* Suspend HCI device */
2299int hci_suspend_dev(struct hci_dev *hdev)
2300{
2301 hci_notify(hdev, HCI_DEV_SUSPEND);
2302 return 0;
2303}
2304EXPORT_SYMBOL(hci_suspend_dev);
2305
2306/* Resume HCI device */
2307int hci_resume_dev(struct hci_dev *hdev)
2308{
2309 hci_notify(hdev, HCI_DEV_RESUME);
2310 return 0;
2311}
2312EXPORT_SYMBOL(hci_resume_dev);
2313
Marcel Holtmann76bca882009-11-18 00:40:39 +01002314/* Receive frame from HCI drivers */
2315int hci_recv_frame(struct sk_buff *skb)
2316{
2317 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2318 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002319 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002320 kfree_skb(skb);
2321 return -ENXIO;
2322 }
2323
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002324 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002325 bt_cb(skb)->incoming = 1;
2326
2327 /* Time stamp */
2328 __net_timestamp(skb);
2329
Marcel Holtmann76bca882009-11-18 00:40:39 +01002330 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002331 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002332
Marcel Holtmann76bca882009-11-18 00:40:39 +01002333 return 0;
2334}
2335EXPORT_SYMBOL(hci_recv_frame);
2336
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302337static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002338 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302339{
2340 int len = 0;
2341 int hlen = 0;
2342 int remain = count;
2343 struct sk_buff *skb;
2344 struct bt_skb_cb *scb;
2345
2346 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002347 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302348 return -EILSEQ;
2349
2350 skb = hdev->reassembly[index];
2351
2352 if (!skb) {
2353 switch (type) {
2354 case HCI_ACLDATA_PKT:
2355 len = HCI_MAX_FRAME_SIZE;
2356 hlen = HCI_ACL_HDR_SIZE;
2357 break;
2358 case HCI_EVENT_PKT:
2359 len = HCI_MAX_EVENT_SIZE;
2360 hlen = HCI_EVENT_HDR_SIZE;
2361 break;
2362 case HCI_SCODATA_PKT:
2363 len = HCI_MAX_SCO_SIZE;
2364 hlen = HCI_SCO_HDR_SIZE;
2365 break;
2366 }
2367
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002368 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302369 if (!skb)
2370 return -ENOMEM;
2371
2372 scb = (void *) skb->cb;
2373 scb->expect = hlen;
2374 scb->pkt_type = type;
2375
2376 skb->dev = (void *) hdev;
2377 hdev->reassembly[index] = skb;
2378 }
2379
2380 while (count) {
2381 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002382 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302383
2384 memcpy(skb_put(skb, len), data, len);
2385
2386 count -= len;
2387 data += len;
2388 scb->expect -= len;
2389 remain = count;
2390
2391 switch (type) {
2392 case HCI_EVENT_PKT:
2393 if (skb->len == HCI_EVENT_HDR_SIZE) {
2394 struct hci_event_hdr *h = hci_event_hdr(skb);
2395 scb->expect = h->plen;
2396
2397 if (skb_tailroom(skb) < scb->expect) {
2398 kfree_skb(skb);
2399 hdev->reassembly[index] = NULL;
2400 return -ENOMEM;
2401 }
2402 }
2403 break;
2404
2405 case HCI_ACLDATA_PKT:
2406 if (skb->len == HCI_ACL_HDR_SIZE) {
2407 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2408 scb->expect = __le16_to_cpu(h->dlen);
2409
2410 if (skb_tailroom(skb) < scb->expect) {
2411 kfree_skb(skb);
2412 hdev->reassembly[index] = NULL;
2413 return -ENOMEM;
2414 }
2415 }
2416 break;
2417
2418 case HCI_SCODATA_PKT:
2419 if (skb->len == HCI_SCO_HDR_SIZE) {
2420 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2421 scb->expect = h->dlen;
2422
2423 if (skb_tailroom(skb) < scb->expect) {
2424 kfree_skb(skb);
2425 hdev->reassembly[index] = NULL;
2426 return -ENOMEM;
2427 }
2428 }
2429 break;
2430 }
2431
2432 if (scb->expect == 0) {
2433 /* Complete frame */
2434
2435 bt_cb(skb)->pkt_type = type;
2436 hci_recv_frame(skb);
2437
2438 hdev->reassembly[index] = NULL;
2439 return remain;
2440 }
2441 }
2442
2443 return remain;
2444}
2445
Marcel Holtmannef222012007-07-11 06:42:04 +02002446int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2447{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302448 int rem = 0;
2449
Marcel Holtmannef222012007-07-11 06:42:04 +02002450 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2451 return -EILSEQ;
2452
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002453 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002454 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302455 if (rem < 0)
2456 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002457
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302458 data += (count - rem);
2459 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002460 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002461
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302462 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002463}
2464EXPORT_SYMBOL(hci_recv_fragment);
2465
Suraj Sumangala99811512010-07-14 13:02:19 +05302466#define STREAM_REASSEMBLY 0
2467
2468int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2469{
2470 int type;
2471 int rem = 0;
2472
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002473 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302474 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2475
2476 if (!skb) {
2477 struct { char type; } *pkt;
2478
2479 /* Start of the frame */
2480 pkt = data;
2481 type = pkt->type;
2482
2483 data++;
2484 count--;
2485 } else
2486 type = bt_cb(skb)->pkt_type;
2487
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002488 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002489 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302490 if (rem < 0)
2491 return rem;
2492
2493 data += (count - rem);
2494 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002495 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302496
2497 return rem;
2498}
2499EXPORT_SYMBOL(hci_recv_stream_fragment);
2500
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501/* ---- Interface to upper protocols ---- */
2502
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503int hci_register_cb(struct hci_cb *cb)
2504{
2505 BT_DBG("%p name %s", cb, cb->name);
2506
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002507 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002509 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510
2511 return 0;
2512}
2513EXPORT_SYMBOL(hci_register_cb);
2514
2515int hci_unregister_cb(struct hci_cb *cb)
2516{
2517 BT_DBG("%p name %s", cb, cb->name);
2518
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002519 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002521 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
2523 return 0;
2524}
2525EXPORT_SYMBOL(hci_unregister_cb);
2526
2527static int hci_send_frame(struct sk_buff *skb)
2528{
2529 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2530
2531 if (!hdev) {
2532 kfree_skb(skb);
2533 return -ENODEV;
2534 }
2535
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002536 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002538 /* Time stamp */
2539 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002541 /* Send copy to monitor */
2542 hci_send_to_monitor(hdev, skb);
2543
2544 if (atomic_read(&hdev->promisc)) {
2545 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002546 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 }
2548
2549 /* Get rid of skb owner, prior to sending to the driver. */
2550 skb_orphan(skb);
2551
2552 return hdev->send(skb);
2553}
2554
Johan Hedberg3119ae92013-03-05 20:37:44 +02002555void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2556{
2557 skb_queue_head_init(&req->cmd_q);
2558 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002559 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002560}
2561
2562int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2563{
2564 struct hci_dev *hdev = req->hdev;
2565 struct sk_buff *skb;
2566 unsigned long flags;
2567
2568 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2569
Andre Guedes5d73e032013-03-08 11:20:16 -03002570 /* If an error occured during request building, remove all HCI
2571 * commands queued on the HCI request queue.
2572 */
2573 if (req->err) {
2574 skb_queue_purge(&req->cmd_q);
2575 return req->err;
2576 }
2577
Johan Hedberg3119ae92013-03-05 20:37:44 +02002578 /* Do not allow empty requests */
2579 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002580 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002581
2582 skb = skb_peek_tail(&req->cmd_q);
2583 bt_cb(skb)->req.complete = complete;
2584
2585 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2586 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2587 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2588
2589 queue_work(hdev->workqueue, &hdev->cmd_work);
2590
2591 return 0;
2592}
2593
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002594static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2595 u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596{
2597 int len = HCI_COMMAND_HDR_SIZE + plen;
2598 struct hci_command_hdr *hdr;
2599 struct sk_buff *skb;
2600
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002602 if (!skb)
2603 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604
2605 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002606 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 hdr->plen = plen;
2608
2609 if (plen)
2610 memcpy(skb_put(skb, plen), param, plen);
2611
2612 BT_DBG("skb len %d", skb->len);
2613
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002614 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002616
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002617 return skb;
2618}
2619
2620/* Send HCI command */
2621int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2622{
2623 struct sk_buff *skb;
2624
2625 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2626
2627 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2628 if (!skb) {
2629 BT_ERR("%s no memory for command", hdev->name);
2630 return -ENOMEM;
2631 }
2632
Johan Hedberg11714b32013-03-05 20:37:47 +02002633 /* Stand-alone HCI commands must be flaged as
2634 * single-command requests.
2635 */
2636 bt_cb(skb)->req.start = true;
2637
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002639 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640
2641 return 0;
2642}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643
Johan Hedberg71c76a12013-03-05 20:37:46 +02002644/* Queue a command to an asynchronous HCI request */
Johan Hedberg02350a72013-04-03 21:50:29 +03002645void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
2646 u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002647{
2648 struct hci_dev *hdev = req->hdev;
2649 struct sk_buff *skb;
2650
2651 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2652
Andre Guedes34739c12013-03-08 11:20:18 -03002653 /* If an error occured during request building, there is no point in
2654 * queueing the HCI command. We can simply return.
2655 */
2656 if (req->err)
2657 return;
2658
Johan Hedberg71c76a12013-03-05 20:37:46 +02002659 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2660 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002661 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2662 hdev->name, opcode);
2663 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002664 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002665 }
2666
2667 if (skb_queue_empty(&req->cmd_q))
2668 bt_cb(skb)->req.start = true;
2669
Johan Hedberg02350a72013-04-03 21:50:29 +03002670 bt_cb(skb)->req.event = event;
2671
Johan Hedberg71c76a12013-03-05 20:37:46 +02002672 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002673}
2674
Johan Hedberg02350a72013-04-03 21:50:29 +03002675void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2676{
2677 hci_req_add_ev(req, opcode, plen, param, 0);
2678}
2679
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002681void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682{
2683 struct hci_command_hdr *hdr;
2684
2685 if (!hdev->sent_cmd)
2686 return NULL;
2687
2688 hdr = (void *) hdev->sent_cmd->data;
2689
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002690 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 return NULL;
2692
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002693 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
2695 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2696}
2697
2698/* Send ACL data */
2699static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2700{
2701 struct hci_acl_hdr *hdr;
2702 int len = skb->len;
2703
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002704 skb_push(skb, HCI_ACL_HDR_SIZE);
2705 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002706 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002707 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2708 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709}
2710
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002711static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002712 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002714 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 struct hci_dev *hdev = conn->hdev;
2716 struct sk_buff *list;
2717
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002718 skb->len = skb_headlen(skb);
2719 skb->data_len = 0;
2720
2721 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002722
2723 switch (hdev->dev_type) {
2724 case HCI_BREDR:
2725 hci_add_acl_hdr(skb, conn->handle, flags);
2726 break;
2727 case HCI_AMP:
2728 hci_add_acl_hdr(skb, chan->handle, flags);
2729 break;
2730 default:
2731 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2732 return;
2733 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002734
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002735 list = skb_shinfo(skb)->frag_list;
2736 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 /* Non fragmented */
2738 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2739
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002740 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 } else {
2742 /* Fragmented */
2743 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2744
2745 skb_shinfo(skb)->frag_list = NULL;
2746
2747 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002748 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002750 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002751
2752 flags &= ~ACL_START;
2753 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 do {
2755 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002756
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002758 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002759 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
2761 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2762
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002763 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 } while (list);
2765
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002766 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002768}
2769
2770void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2771{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002772 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002773
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002774 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002775
2776 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002777
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002778 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002780 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
2783/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002784void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785{
2786 struct hci_dev *hdev = conn->hdev;
2787 struct hci_sco_hdr hdr;
2788
2789 BT_DBG("%s len %d", hdev->name, skb->len);
2790
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002791 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 hdr.dlen = skb->len;
2793
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002794 skb_push(skb, HCI_SCO_HDR_SIZE);
2795 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002796 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
2798 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002799 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002800
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002802 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805/* ---- HCI TX task (outgoing data) ---- */
2806
2807/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002808static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2809 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810{
2811 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002812 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002813 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002815 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002817
2818 rcu_read_lock();
2819
2820 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002821 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002823
2824 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2825 continue;
2826
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 num++;
2828
2829 if (c->sent < min) {
2830 min = c->sent;
2831 conn = c;
2832 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002833
2834 if (hci_conn_num(hdev, type) == num)
2835 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 }
2837
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002838 rcu_read_unlock();
2839
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002841 int cnt, q;
2842
2843 switch (conn->type) {
2844 case ACL_LINK:
2845 cnt = hdev->acl_cnt;
2846 break;
2847 case SCO_LINK:
2848 case ESCO_LINK:
2849 cnt = hdev->sco_cnt;
2850 break;
2851 case LE_LINK:
2852 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2853 break;
2854 default:
2855 cnt = 0;
2856 BT_ERR("Unknown link type");
2857 }
2858
2859 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 *quote = q ? q : 1;
2861 } else
2862 *quote = 0;
2863
2864 BT_DBG("conn %p quote %d", conn, *quote);
2865 return conn;
2866}
2867
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002868static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869{
2870 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002871 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872
Ville Tervobae1f5d92011-02-10 22:38:53 -03002873 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002875 rcu_read_lock();
2876
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002878 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002879 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002880 BT_ERR("%s killing stalled connection %pMR",
2881 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002882 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 }
2884 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002885
2886 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887}
2888
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002889static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2890 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002891{
2892 struct hci_conn_hash *h = &hdev->conn_hash;
2893 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002894 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002895 struct hci_conn *conn;
2896 int cnt, q, conn_num = 0;
2897
2898 BT_DBG("%s", hdev->name);
2899
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002900 rcu_read_lock();
2901
2902 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002903 struct hci_chan *tmp;
2904
2905 if (conn->type != type)
2906 continue;
2907
2908 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2909 continue;
2910
2911 conn_num++;
2912
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002913 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002914 struct sk_buff *skb;
2915
2916 if (skb_queue_empty(&tmp->data_q))
2917 continue;
2918
2919 skb = skb_peek(&tmp->data_q);
2920 if (skb->priority < cur_prio)
2921 continue;
2922
2923 if (skb->priority > cur_prio) {
2924 num = 0;
2925 min = ~0;
2926 cur_prio = skb->priority;
2927 }
2928
2929 num++;
2930
2931 if (conn->sent < min) {
2932 min = conn->sent;
2933 chan = tmp;
2934 }
2935 }
2936
2937 if (hci_conn_num(hdev, type) == conn_num)
2938 break;
2939 }
2940
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002941 rcu_read_unlock();
2942
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002943 if (!chan)
2944 return NULL;
2945
2946 switch (chan->conn->type) {
2947 case ACL_LINK:
2948 cnt = hdev->acl_cnt;
2949 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002950 case AMP_LINK:
2951 cnt = hdev->block_cnt;
2952 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002953 case SCO_LINK:
2954 case ESCO_LINK:
2955 cnt = hdev->sco_cnt;
2956 break;
2957 case LE_LINK:
2958 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2959 break;
2960 default:
2961 cnt = 0;
2962 BT_ERR("Unknown link type");
2963 }
2964
2965 q = cnt / num;
2966 *quote = q ? q : 1;
2967 BT_DBG("chan %p quote %d", chan, *quote);
2968 return chan;
2969}
2970
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002971static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2972{
2973 struct hci_conn_hash *h = &hdev->conn_hash;
2974 struct hci_conn *conn;
2975 int num = 0;
2976
2977 BT_DBG("%s", hdev->name);
2978
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002979 rcu_read_lock();
2980
2981 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002982 struct hci_chan *chan;
2983
2984 if (conn->type != type)
2985 continue;
2986
2987 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2988 continue;
2989
2990 num++;
2991
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002992 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002993 struct sk_buff *skb;
2994
2995 if (chan->sent) {
2996 chan->sent = 0;
2997 continue;
2998 }
2999
3000 if (skb_queue_empty(&chan->data_q))
3001 continue;
3002
3003 skb = skb_peek(&chan->data_q);
3004 if (skb->priority >= HCI_PRIO_MAX - 1)
3005 continue;
3006
3007 skb->priority = HCI_PRIO_MAX - 1;
3008
3009 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003010 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003011 }
3012
3013 if (hci_conn_num(hdev, type) == num)
3014 break;
3015 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003016
3017 rcu_read_unlock();
3018
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003019}
3020
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003021static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3022{
3023 /* Calculate count of blocks used by this packet */
3024 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3025}
3026
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003027static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 if (!test_bit(HCI_RAW, &hdev->flags)) {
3030 /* ACL tx timeout must be longer than maximum
3031 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003032 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003033 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003034 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003036}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003038static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003039{
3040 unsigned int cnt = hdev->acl_cnt;
3041 struct hci_chan *chan;
3042 struct sk_buff *skb;
3043 int quote;
3044
3045 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003046
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003047 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003048 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003049 u32 priority = (skb_peek(&chan->data_q))->priority;
3050 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003051 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003052 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003053
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003054 /* Stop if priority has changed */
3055 if (skb->priority < priority)
3056 break;
3057
3058 skb = skb_dequeue(&chan->data_q);
3059
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003060 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003061 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003062
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 hci_send_frame(skb);
3064 hdev->acl_last_tx = jiffies;
3065
3066 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003067 chan->sent++;
3068 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 }
3070 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003071
3072 if (cnt != hdev->acl_cnt)
3073 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074}
3075
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003076static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003077{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003078 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003079 struct hci_chan *chan;
3080 struct sk_buff *skb;
3081 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003082 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003083
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003084 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003085
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003086 BT_DBG("%s", hdev->name);
3087
3088 if (hdev->dev_type == HCI_AMP)
3089 type = AMP_LINK;
3090 else
3091 type = ACL_LINK;
3092
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003093 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003094 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003095 u32 priority = (skb_peek(&chan->data_q))->priority;
3096 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3097 int blocks;
3098
3099 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003100 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003101
3102 /* Stop if priority has changed */
3103 if (skb->priority < priority)
3104 break;
3105
3106 skb = skb_dequeue(&chan->data_q);
3107
3108 blocks = __get_blocks(hdev, skb);
3109 if (blocks > hdev->block_cnt)
3110 return;
3111
3112 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003113 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003114
3115 hci_send_frame(skb);
3116 hdev->acl_last_tx = jiffies;
3117
3118 hdev->block_cnt -= blocks;
3119 quote -= blocks;
3120
3121 chan->sent += blocks;
3122 chan->conn->sent += blocks;
3123 }
3124 }
3125
3126 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003127 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003128}
3129
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003130static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003131{
3132 BT_DBG("%s", hdev->name);
3133
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003134 /* No ACL link over BR/EDR controller */
3135 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3136 return;
3137
3138 /* No AMP link over AMP controller */
3139 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003140 return;
3141
3142 switch (hdev->flow_ctl_mode) {
3143 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3144 hci_sched_acl_pkt(hdev);
3145 break;
3146
3147 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3148 hci_sched_acl_blk(hdev);
3149 break;
3150 }
3151}
3152
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003154static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155{
3156 struct hci_conn *conn;
3157 struct sk_buff *skb;
3158 int quote;
3159
3160 BT_DBG("%s", hdev->name);
3161
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003162 if (!hci_conn_num(hdev, SCO_LINK))
3163 return;
3164
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3166 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3167 BT_DBG("skb %p len %d", skb, skb->len);
3168 hci_send_frame(skb);
3169
3170 conn->sent++;
3171 if (conn->sent == ~0)
3172 conn->sent = 0;
3173 }
3174 }
3175}
3176
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003177static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003178{
3179 struct hci_conn *conn;
3180 struct sk_buff *skb;
3181 int quote;
3182
3183 BT_DBG("%s", hdev->name);
3184
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003185 if (!hci_conn_num(hdev, ESCO_LINK))
3186 return;
3187
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003188 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3189 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003190 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3191 BT_DBG("skb %p len %d", skb, skb->len);
3192 hci_send_frame(skb);
3193
3194 conn->sent++;
3195 if (conn->sent == ~0)
3196 conn->sent = 0;
3197 }
3198 }
3199}
3200
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003201static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003202{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003203 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003204 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003205 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003206
3207 BT_DBG("%s", hdev->name);
3208
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003209 if (!hci_conn_num(hdev, LE_LINK))
3210 return;
3211
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003212 if (!test_bit(HCI_RAW, &hdev->flags)) {
3213 /* LE tx timeout must be longer than maximum
3214 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003215 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003216 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003217 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003218 }
3219
3220 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003221 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003222 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003223 u32 priority = (skb_peek(&chan->data_q))->priority;
3224 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003225 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003226 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003227
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003228 /* Stop if priority has changed */
3229 if (skb->priority < priority)
3230 break;
3231
3232 skb = skb_dequeue(&chan->data_q);
3233
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003234 hci_send_frame(skb);
3235 hdev->le_last_tx = jiffies;
3236
3237 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003238 chan->sent++;
3239 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003240 }
3241 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003242
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003243 if (hdev->le_pkts)
3244 hdev->le_cnt = cnt;
3245 else
3246 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003247
3248 if (cnt != tmp)
3249 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003250}
3251
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003252static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003254 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 struct sk_buff *skb;
3256
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003257 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003258 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259
3260 /* Schedule queues and send stuff to HCI driver */
3261
3262 hci_sched_acl(hdev);
3263
3264 hci_sched_sco(hdev);
3265
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003266 hci_sched_esco(hdev);
3267
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003268 hci_sched_le(hdev);
3269
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270 /* Send next queued raw (unknown type) packet */
3271 while ((skb = skb_dequeue(&hdev->raw_q)))
3272 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273}
3274
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003275/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276
3277/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003278static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279{
3280 struct hci_acl_hdr *hdr = (void *) skb->data;
3281 struct hci_conn *conn;
3282 __u16 handle, flags;
3283
3284 skb_pull(skb, HCI_ACL_HDR_SIZE);
3285
3286 handle = __le16_to_cpu(hdr->handle);
3287 flags = hci_flags(handle);
3288 handle = hci_handle(handle);
3289
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003290 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003291 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292
3293 hdev->stat.acl_rx++;
3294
3295 hci_dev_lock(hdev);
3296 conn = hci_conn_hash_lookup_handle(hdev, handle);
3297 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003298
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003300 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003301
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003303 l2cap_recv_acldata(conn, skb, flags);
3304 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003306 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003307 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 }
3309
3310 kfree_skb(skb);
3311}
3312
3313/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003314static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315{
3316 struct hci_sco_hdr *hdr = (void *) skb->data;
3317 struct hci_conn *conn;
3318 __u16 handle;
3319
3320 skb_pull(skb, HCI_SCO_HDR_SIZE);
3321
3322 handle = __le16_to_cpu(hdr->handle);
3323
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003324 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325
3326 hdev->stat.sco_rx++;
3327
3328 hci_dev_lock(hdev);
3329 conn = hci_conn_hash_lookup_handle(hdev, handle);
3330 hci_dev_unlock(hdev);
3331
3332 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003334 sco_recv_scodata(conn, skb);
3335 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003337 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003338 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 }
3340
3341 kfree_skb(skb);
3342}
3343
Johan Hedberg9238f362013-03-05 20:37:48 +02003344static bool hci_req_is_complete(struct hci_dev *hdev)
3345{
3346 struct sk_buff *skb;
3347
3348 skb = skb_peek(&hdev->cmd_q);
3349 if (!skb)
3350 return true;
3351
3352 return bt_cb(skb)->req.start;
3353}
3354
Johan Hedberg42c6b122013-03-05 20:37:49 +02003355static void hci_resend_last(struct hci_dev *hdev)
3356{
3357 struct hci_command_hdr *sent;
3358 struct sk_buff *skb;
3359 u16 opcode;
3360
3361 if (!hdev->sent_cmd)
3362 return;
3363
3364 sent = (void *) hdev->sent_cmd->data;
3365 opcode = __le16_to_cpu(sent->opcode);
3366 if (opcode == HCI_OP_RESET)
3367 return;
3368
3369 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3370 if (!skb)
3371 return;
3372
3373 skb_queue_head(&hdev->cmd_q, skb);
3374 queue_work(hdev->workqueue, &hdev->cmd_work);
3375}
3376
Johan Hedberg9238f362013-03-05 20:37:48 +02003377void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3378{
3379 hci_req_complete_t req_complete = NULL;
3380 struct sk_buff *skb;
3381 unsigned long flags;
3382
3383 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3384
Johan Hedberg42c6b122013-03-05 20:37:49 +02003385 /* If the completed command doesn't match the last one that was
3386 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003387 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003388 if (!hci_sent_cmd_data(hdev, opcode)) {
3389 /* Some CSR based controllers generate a spontaneous
3390 * reset complete event during init and any pending
3391 * command will never be completed. In such a case we
3392 * need to resend whatever was the last sent
3393 * command.
3394 */
3395 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3396 hci_resend_last(hdev);
3397
Johan Hedberg9238f362013-03-05 20:37:48 +02003398 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003399 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003400
3401 /* If the command succeeded and there's still more commands in
3402 * this request the request is not yet complete.
3403 */
3404 if (!status && !hci_req_is_complete(hdev))
3405 return;
3406
3407 /* If this was the last command in a request the complete
3408 * callback would be found in hdev->sent_cmd instead of the
3409 * command queue (hdev->cmd_q).
3410 */
3411 if (hdev->sent_cmd) {
3412 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3413 if (req_complete)
3414 goto call_complete;
3415 }
3416
3417 /* Remove all pending commands belonging to this request */
3418 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3419 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3420 if (bt_cb(skb)->req.start) {
3421 __skb_queue_head(&hdev->cmd_q, skb);
3422 break;
3423 }
3424
3425 req_complete = bt_cb(skb)->req.complete;
3426 kfree_skb(skb);
3427 }
3428 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3429
3430call_complete:
3431 if (req_complete)
3432 req_complete(hdev, status);
3433}
3434
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003435static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003437 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 struct sk_buff *skb;
3439
3440 BT_DBG("%s", hdev->name);
3441
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003443 /* Send copy to monitor */
3444 hci_send_to_monitor(hdev, skb);
3445
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 if (atomic_read(&hdev->promisc)) {
3447 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003448 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 }
3450
3451 if (test_bit(HCI_RAW, &hdev->flags)) {
3452 kfree_skb(skb);
3453 continue;
3454 }
3455
3456 if (test_bit(HCI_INIT, &hdev->flags)) {
3457 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003458 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 case HCI_ACLDATA_PKT:
3460 case HCI_SCODATA_PKT:
3461 kfree_skb(skb);
3462 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 }
3465
3466 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003467 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003469 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 hci_event_packet(hdev, skb);
3471 break;
3472
3473 case HCI_ACLDATA_PKT:
3474 BT_DBG("%s ACL data packet", hdev->name);
3475 hci_acldata_packet(hdev, skb);
3476 break;
3477
3478 case HCI_SCODATA_PKT:
3479 BT_DBG("%s SCO data packet", hdev->name);
3480 hci_scodata_packet(hdev, skb);
3481 break;
3482
3483 default:
3484 kfree_skb(skb);
3485 break;
3486 }
3487 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488}
3489
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003490static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003492 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 struct sk_buff *skb;
3494
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003495 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3496 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003499 if (atomic_read(&hdev->cmd_cnt)) {
3500 skb = skb_dequeue(&hdev->cmd_q);
3501 if (!skb)
3502 return;
3503
Wei Yongjun7585b972009-02-25 18:29:52 +08003504 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003506 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3507 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 atomic_dec(&hdev->cmd_cnt);
3509 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003510 if (test_bit(HCI_RESET, &hdev->flags))
3511 del_timer(&hdev->cmd_timer);
3512 else
3513 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003514 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 } else {
3516 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003517 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 }
3519 }
3520}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003521
3522int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3523{
3524 /* General inquiry access code (GIAC) */
3525 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3526 struct hci_cp_inquiry cp;
3527
3528 BT_DBG("%s", hdev->name);
3529
3530 if (test_bit(HCI_INQUIRY, &hdev->flags))
3531 return -EINPROGRESS;
3532
Johan Hedberg46632622012-01-02 16:06:08 +02003533 inquiry_cache_flush(hdev);
3534
Andre Guedes2519a1f2011-11-07 11:45:24 -03003535 memset(&cp, 0, sizeof(cp));
3536 memcpy(&cp.lap, lap, sizeof(cp.lap));
3537 cp.length = length;
3538
3539 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3540}
Andre Guedes023d50492011-11-04 14:16:52 -03003541
3542int hci_cancel_inquiry(struct hci_dev *hdev)
3543{
3544 BT_DBG("%s", hdev->name);
3545
3546 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003547 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003548
3549 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3550}
Andre Guedes31f79562012-04-24 21:02:53 -03003551
3552u8 bdaddr_to_le(u8 bdaddr_type)
3553{
3554 switch (bdaddr_type) {
3555 case BDADDR_LE_PUBLIC:
3556 return ADDR_LE_DEV_PUBLIC;
3557
3558 default:
3559 /* Fallback to LE Random address type */
3560 return ADDR_LE_DEV_RANDOM;
3561 }
3562}