blob: 3fc699db8fb5a7c75350212197bb7f8659c24c1e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +020083static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +020084 void (*func)(struct hci_request *req,
85 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +020086 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Johan Hedberg42c6b122013-03-05 20:37:49 +020088 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
Johan Hedberg42c6b122013-03-05 20:37:49 +020094 hci_req_init(&req, hdev);
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 hdev->req_status = HCI_REQ_PEND;
97
98 add_wait_queue(&hdev->req_wait_q, &wait);
99 set_current_state(TASK_INTERRUPTIBLE);
100
Johan Hedberg42c6b122013-03-05 20:37:49 +0200101 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200102
Johan Hedberg42c6b122013-03-05 20:37:49 +0200103 err = hci_req_run(&req, hci_req_sync_complete);
104 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200105 hdev->req_status = 0;
106 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200107 /* req_run will fail if the request did not add any
108 * commands to the queue, something that can happen when
109 * a request with conditionals doesn't trigger any
110 * commands to be sent. This is normal behavior and
111 * should not trigger an error return.
112 */
113 return 0;
Johan Hedberg53cce222013-03-05 20:37:42 +0200114 }
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 schedule_timeout(timeout);
117
118 remove_wait_queue(&hdev->req_wait_q, &wait);
119
120 if (signal_pending(current))
121 return -EINTR;
122
123 switch (hdev->req_status) {
124 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700125 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 break;
127
128 case HCI_REQ_CANCELED:
129 err = -hdev->req_result;
130 break;
131
132 default:
133 err = -ETIMEDOUT;
134 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Johan Hedberga5040ef2011-01-10 13:28:59 +0200137 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139 BT_DBG("%s end: err %d", hdev->name, err);
140
141 return err;
142}
143
Johan Hedberg01178cd2013-03-05 20:37:41 +0200144static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200145 void (*req)(struct hci_request *req,
146 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200147 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148{
149 int ret;
150
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200151 if (!test_bit(HCI_UP, &hdev->flags))
152 return -ENETDOWN;
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 /* Serialize all requests */
155 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200156 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 hci_req_unlock(hdev);
158
159 return ret;
160}
161
Johan Hedberg42c6b122013-03-05 20:37:49 +0200162static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200164 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200167 set_bit(HCI_RESET, &req->hdev->flags);
168 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
170
Johan Hedberg42c6b122013-03-05 20:37:49 +0200171static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200173 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200176 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200178 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200179 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200180
181 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200182 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183}
184
Johan Hedberg42c6b122013-03-05 20:37:49 +0200185static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200186{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200187 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200188
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200189 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200190 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300191
192 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200193 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300194
195 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200196 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200197}
198
Johan Hedberg42c6b122013-03-05 20:37:49 +0200199static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200200{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200201 struct hci_dev *hdev = req->hdev;
202 struct hci_request init_req;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200203 struct sk_buff *skb;
204
205 BT_DBG("%s %ld", hdev->name, opt);
206
207 /* Driver initialization */
208
Johan Hedberg42c6b122013-03-05 20:37:49 +0200209 hci_req_init(&init_req, hdev);
210
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
214 skb->dev = (void *) hdev;
215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 if (skb_queue_empty(&init_req.cmd_q))
217 bt_cb(skb)->req.start = true;
218
219 skb_queue_tail(&init_req.cmd_q, skb);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200220 }
221 skb_queue_purge(&hdev->driver_init);
222
Johan Hedberg42c6b122013-03-05 20:37:49 +0200223 hci_req_run(&init_req, NULL);
224
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300225 /* Reset */
226 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200227 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300228
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200229 switch (hdev->dev_type) {
230 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200231 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200232 break;
233
234 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200235 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200236 break;
237
238 default:
239 BT_ERR("Unknown device type %d", hdev->dev_type);
240 break;
241 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242}
243
Johan Hedberg42c6b122013-03-05 20:37:49 +0200244static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200245{
246 struct hci_cp_delete_stored_link_key cp;
247 __le16 param;
248 __u8 flt_type;
249
250 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200251 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200252
253 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200254 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200255
256 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200257 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200258
259 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200260 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200261
262 /* Clear Event Filters */
263 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200264 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200265
266 /* Connection accept timeout ~20 secs */
267 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200268 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200269
270 bacpy(&cp.bdaddr, BDADDR_ANY);
271 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200272 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200273}
274
Johan Hedberg42c6b122013-03-05 20:37:49 +0200275static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200276{
277 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200278 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200279
280 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200281 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200282
283 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200284 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200285
286 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200288
289 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200290 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200291}
292
293static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
294{
295 if (lmp_ext_inq_capable(hdev))
296 return 0x02;
297
298 if (lmp_inq_rssi_capable(hdev))
299 return 0x01;
300
301 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
302 hdev->lmp_subver == 0x0757)
303 return 0x01;
304
305 if (hdev->manufacturer == 15) {
306 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
307 return 0x01;
308 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
309 return 0x01;
310 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
311 return 0x01;
312 }
313
314 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
315 hdev->lmp_subver == 0x1805)
316 return 0x01;
317
318 return 0x00;
319}
320
Johan Hedberg42c6b122013-03-05 20:37:49 +0200321static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200322{
323 u8 mode;
324
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200326
Johan Hedberg42c6b122013-03-05 20:37:49 +0200327 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200328}
329
Johan Hedberg42c6b122013-03-05 20:37:49 +0200330static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200331{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200332 struct hci_dev *hdev = req->hdev;
333
Johan Hedberg2177bab2013-03-05 20:37:43 +0200334 /* The second byte is 0xff instead of 0x9f (two reserved bits
335 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
336 * command otherwise.
337 */
338 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
339
340 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
341 * any event mask for pre 1.2 devices.
342 */
343 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
344 return;
345
346 if (lmp_bredr_capable(hdev)) {
347 events[4] |= 0x01; /* Flow Specification Complete */
348 events[4] |= 0x02; /* Inquiry Result with RSSI */
349 events[4] |= 0x04; /* Read Remote Extended Features Complete */
350 events[5] |= 0x08; /* Synchronous Connection Complete */
351 events[5] |= 0x10; /* Synchronous Connection Changed */
352 }
353
354 if (lmp_inq_rssi_capable(hdev))
355 events[4] |= 0x02; /* Inquiry Result with RSSI */
356
357 if (lmp_sniffsubr_capable(hdev))
358 events[5] |= 0x20; /* Sniff Subrating */
359
360 if (lmp_pause_enc_capable(hdev))
361 events[5] |= 0x80; /* Encryption Key Refresh Complete */
362
363 if (lmp_ext_inq_capable(hdev))
364 events[5] |= 0x40; /* Extended Inquiry Result */
365
366 if (lmp_no_flush_capable(hdev))
367 events[7] |= 0x01; /* Enhanced Flush Complete */
368
369 if (lmp_lsto_capable(hdev))
370 events[6] |= 0x80; /* Link Supervision Timeout Changed */
371
372 if (lmp_ssp_capable(hdev)) {
373 events[6] |= 0x01; /* IO Capability Request */
374 events[6] |= 0x02; /* IO Capability Response */
375 events[6] |= 0x04; /* User Confirmation Request */
376 events[6] |= 0x08; /* User Passkey Request */
377 events[6] |= 0x10; /* Remote OOB Data Request */
378 events[6] |= 0x20; /* Simple Pairing Complete */
379 events[7] |= 0x04; /* User Passkey Notification */
380 events[7] |= 0x08; /* Keypress Notification */
381 events[7] |= 0x10; /* Remote Host Supported
382 * Features Notification
383 */
384 }
385
386 if (lmp_le_capable(hdev))
387 events[7] |= 0x20; /* LE Meta-Event */
388
Johan Hedberg42c6b122013-03-05 20:37:49 +0200389 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200390
391 if (lmp_le_capable(hdev)) {
392 memset(events, 0, sizeof(events));
393 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
395 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396 }
397}
398
Johan Hedberg42c6b122013-03-05 20:37:49 +0200399static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200400{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401 struct hci_dev *hdev = req->hdev;
402
Johan Hedberg2177bab2013-03-05 20:37:43 +0200403 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200404 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200405
406 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408
Johan Hedberg42c6b122013-03-05 20:37:49 +0200409 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200410
411 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200412 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200413
414 if (lmp_ssp_capable(hdev)) {
415 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
416 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200417 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
418 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200419 } else {
420 struct hci_cp_write_eir cp;
421
422 memset(hdev->eir, 0, sizeof(hdev->eir));
423 memset(&cp, 0, sizeof(cp));
424
Johan Hedberg42c6b122013-03-05 20:37:49 +0200425 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200426 }
427 }
428
429 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
432 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200433 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200434
435 if (lmp_ext_feat_capable(hdev)) {
436 struct hci_cp_read_local_ext_features cp;
437
438 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
440 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200441 }
442
443 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
444 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
446 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200447 }
448}
449
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200453 struct hci_cp_write_def_link_policy cp;
454 u16 link_policy = 0;
455
456 if (lmp_rswitch_capable(hdev))
457 link_policy |= HCI_LP_RSWITCH;
458 if (lmp_hold_capable(hdev))
459 link_policy |= HCI_LP_HOLD;
460 if (lmp_sniff_capable(hdev))
461 link_policy |= HCI_LP_SNIFF;
462 if (lmp_park_capable(hdev))
463 link_policy |= HCI_LP_PARK;
464
465 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467}
468
Johan Hedberg42c6b122013-03-05 20:37:49 +0200469static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200470{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200472 struct hci_cp_write_le_host_supported cp;
473
474 memset(&cp, 0, sizeof(cp));
475
476 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
477 cp.le = 0x01;
478 cp.simul = lmp_le_br_capable(hdev);
479 }
480
481 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200482 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
483 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200484}
485
Johan Hedberg42c6b122013-03-05 20:37:49 +0200486static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200487{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200488 struct hci_dev *hdev = req->hdev;
489
Johan Hedberg2177bab2013-03-05 20:37:43 +0200490 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200491 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492
493 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 hci_set_le_support(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495}
496
497static int __hci_init(struct hci_dev *hdev)
498{
499 int err;
500
501 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
502 if (err < 0)
503 return err;
504
505 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
506 * BR/EDR/LE type controllers. AMP controllers only need the
507 * first stage init.
508 */
509 if (hdev->dev_type != HCI_BREDR)
510 return 0;
511
512 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
513 if (err < 0)
514 return err;
515
516 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
517}
518
Johan Hedberg42c6b122013-03-05 20:37:49 +0200519static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520{
521 __u8 scan = opt;
522
Johan Hedberg42c6b122013-03-05 20:37:49 +0200523 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
525 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527}
528
Johan Hedberg42c6b122013-03-05 20:37:49 +0200529static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
531 __u8 auth = opt;
532
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200536 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537}
538
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540{
541 __u8 encrypt = opt;
542
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200545 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200546 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
Johan Hedberg42c6b122013-03-05 20:37:49 +0200549static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200550{
551 __le16 policy = cpu_to_le16(opt);
552
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200554
555 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200556 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200557}
558
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900559/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 * Device is held on return. */
561struct hci_dev *hci_dev_get(int index)
562{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200563 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
565 BT_DBG("%d", index);
566
567 if (index < 0)
568 return NULL;
569
570 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200571 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 if (d->id == index) {
573 hdev = hci_dev_hold(d);
574 break;
575 }
576 }
577 read_unlock(&hci_dev_list_lock);
578 return hdev;
579}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200582
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200583bool hci_discovery_active(struct hci_dev *hdev)
584{
585 struct discovery_state *discov = &hdev->discovery;
586
Andre Guedes6fbe1952012-02-03 17:47:58 -0300587 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300588 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300589 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200590 return true;
591
Andre Guedes6fbe1952012-02-03 17:47:58 -0300592 default:
593 return false;
594 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200595}
596
Johan Hedbergff9ef572012-01-04 14:23:45 +0200597void hci_discovery_set_state(struct hci_dev *hdev, int state)
598{
599 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
600
601 if (hdev->discovery.state == state)
602 return;
603
604 switch (state) {
605 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300606 if (hdev->discovery.state != DISCOVERY_STARTING)
607 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200608 break;
609 case DISCOVERY_STARTING:
610 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300611 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200612 mgmt_discovering(hdev, 1);
613 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200614 case DISCOVERY_RESOLVING:
615 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200616 case DISCOVERY_STOPPING:
617 break;
618 }
619
620 hdev->discovery.state = state;
621}
622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623static void inquiry_cache_flush(struct hci_dev *hdev)
624{
Johan Hedberg30883512012-01-04 14:16:21 +0200625 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200626 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Johan Hedberg561aafb2012-01-04 13:31:59 +0200628 list_for_each_entry_safe(p, n, &cache->all, all) {
629 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200630 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200632
633 INIT_LIST_HEAD(&cache->unknown);
634 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635}
636
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300637struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
638 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639{
Johan Hedberg30883512012-01-04 14:16:21 +0200640 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 struct inquiry_entry *e;
642
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300643 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Johan Hedberg561aafb2012-01-04 13:31:59 +0200645 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200647 return e;
648 }
649
650 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651}
652
Johan Hedberg561aafb2012-01-04 13:31:59 +0200653struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300654 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200655{
Johan Hedberg30883512012-01-04 14:16:21 +0200656 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200657 struct inquiry_entry *e;
658
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300659 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200660
661 list_for_each_entry(e, &cache->unknown, list) {
662 if (!bacmp(&e->data.bdaddr, bdaddr))
663 return e;
664 }
665
666 return NULL;
667}
668
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200669struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300670 bdaddr_t *bdaddr,
671 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200672{
673 struct discovery_state *cache = &hdev->discovery;
674 struct inquiry_entry *e;
675
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300676 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200677
678 list_for_each_entry(e, &cache->resolve, list) {
679 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
680 return e;
681 if (!bacmp(&e->data.bdaddr, bdaddr))
682 return e;
683 }
684
685 return NULL;
686}
687
Johan Hedberga3d4e202012-01-09 00:53:02 +0200688void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300689 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200690{
691 struct discovery_state *cache = &hdev->discovery;
692 struct list_head *pos = &cache->resolve;
693 struct inquiry_entry *p;
694
695 list_del(&ie->list);
696
697 list_for_each_entry(p, &cache->resolve, list) {
698 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300699 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200700 break;
701 pos = &p->list;
702 }
703
704 list_add(&ie->list, pos);
705}
706
Johan Hedberg31754052012-01-04 13:39:52 +0200707bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300708 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
Johan Hedberg30883512012-01-04 14:16:21 +0200710 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200711 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300713 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
Szymon Janc2b2fec42012-11-20 11:38:54 +0100715 hci_remove_remote_oob_data(hdev, &data->bdaddr);
716
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200717 if (ssp)
718 *ssp = data->ssp_mode;
719
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200720 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200721 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200722 if (ie->data.ssp_mode && ssp)
723 *ssp = true;
724
Johan Hedberga3d4e202012-01-09 00:53:02 +0200725 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300726 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200727 ie->data.rssi = data->rssi;
728 hci_inquiry_cache_update_resolve(hdev, ie);
729 }
730
Johan Hedberg561aafb2012-01-04 13:31:59 +0200731 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200732 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200733
Johan Hedberg561aafb2012-01-04 13:31:59 +0200734 /* Entry not in the cache. Add new one. */
735 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
736 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200737 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200738
739 list_add(&ie->all, &cache->all);
740
741 if (name_known) {
742 ie->name_state = NAME_KNOWN;
743 } else {
744 ie->name_state = NAME_NOT_KNOWN;
745 list_add(&ie->list, &cache->unknown);
746 }
747
748update:
749 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300750 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200751 ie->name_state = NAME_KNOWN;
752 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 }
754
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200755 memcpy(&ie->data, data, sizeof(*data));
756 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200758
759 if (ie->name_state == NAME_NOT_KNOWN)
760 return false;
761
762 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763}
764
765static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
766{
Johan Hedberg30883512012-01-04 14:16:21 +0200767 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 struct inquiry_info *info = (struct inquiry_info *) buf;
769 struct inquiry_entry *e;
770 int copied = 0;
771
Johan Hedberg561aafb2012-01-04 13:31:59 +0200772 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200774
775 if (copied >= num)
776 break;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 bacpy(&info->bdaddr, &data->bdaddr);
779 info->pscan_rep_mode = data->pscan_rep_mode;
780 info->pscan_period_mode = data->pscan_period_mode;
781 info->pscan_mode = data->pscan_mode;
782 memcpy(info->dev_class, data->dev_class, 3);
783 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200786 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 }
788
789 BT_DBG("cache %p, copied %d", cache, copied);
790 return copied;
791}
792
Johan Hedberg42c6b122013-03-05 20:37:49 +0200793static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794{
795 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200796 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 struct hci_cp_inquiry cp;
798
799 BT_DBG("%s", hdev->name);
800
801 if (test_bit(HCI_INQUIRY, &hdev->flags))
802 return;
803
804 /* Start Inquiry */
805 memcpy(&cp.lap, &ir->lap, 3);
806 cp.length = ir->length;
807 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200808 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809}
810
811int hci_inquiry(void __user *arg)
812{
813 __u8 __user *ptr = arg;
814 struct hci_inquiry_req ir;
815 struct hci_dev *hdev;
816 int err = 0, do_inquiry = 0, max_rsp;
817 long timeo;
818 __u8 *buf;
819
820 if (copy_from_user(&ir, ptr, sizeof(ir)))
821 return -EFAULT;
822
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200823 hdev = hci_dev_get(ir.dev_id);
824 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 return -ENODEV;
826
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300827 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900828 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300829 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 inquiry_cache_flush(hdev);
831 do_inquiry = 1;
832 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300833 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Marcel Holtmann04837f62006-07-03 10:02:33 +0200835 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200836
837 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200838 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
839 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200840 if (err < 0)
841 goto done;
842 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300844 /* for unlimited number of responses we will use buffer with
845 * 255 entries
846 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
848
849 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
850 * copy it to the user space.
851 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100852 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200853 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 err = -ENOMEM;
855 goto done;
856 }
857
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300858 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300860 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862 BT_DBG("num_rsp %d", ir.num_rsp);
863
864 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
865 ptr += sizeof(ir);
866 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300867 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900869 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 err = -EFAULT;
871
872 kfree(buf);
873
874done:
875 hci_dev_put(hdev);
876 return err;
877}
878
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100879static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
880{
881 u8 ad_len = 0, flags = 0;
882 size_t name_len;
883
884 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
885 flags |= LE_AD_GENERAL;
886
887 if (!lmp_bredr_capable(hdev))
888 flags |= LE_AD_NO_BREDR;
889
890 if (lmp_le_br_capable(hdev))
891 flags |= LE_AD_SIM_LE_BREDR_CTRL;
892
893 if (lmp_host_le_br_capable(hdev))
894 flags |= LE_AD_SIM_LE_BREDR_HOST;
895
896 if (flags) {
897 BT_DBG("adv flags 0x%02x", flags);
898
899 ptr[0] = 2;
900 ptr[1] = EIR_FLAGS;
901 ptr[2] = flags;
902
903 ad_len += 3;
904 ptr += 3;
905 }
906
907 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
908 ptr[0] = 2;
909 ptr[1] = EIR_TX_POWER;
910 ptr[2] = (u8) hdev->adv_tx_power;
911
912 ad_len += 3;
913 ptr += 3;
914 }
915
916 name_len = strlen(hdev->dev_name);
917 if (name_len > 0) {
918 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
919
920 if (name_len > max_len) {
921 name_len = max_len;
922 ptr[1] = EIR_NAME_SHORT;
923 } else
924 ptr[1] = EIR_NAME_COMPLETE;
925
926 ptr[0] = name_len + 1;
927
928 memcpy(ptr + 2, hdev->dev_name, name_len);
929
930 ad_len += (name_len + 2);
931 ptr += (name_len + 2);
932 }
933
934 return ad_len;
935}
936
937int hci_update_ad(struct hci_dev *hdev)
938{
939 struct hci_cp_le_set_adv_data cp;
940 u8 len;
941 int err;
942
943 hci_dev_lock(hdev);
944
945 if (!lmp_le_capable(hdev)) {
946 err = -EINVAL;
947 goto unlock;
948 }
949
950 memset(&cp, 0, sizeof(cp));
951
952 len = create_ad(hdev, cp.data);
953
954 if (hdev->adv_data_len == len &&
955 memcmp(cp.data, hdev->adv_data, len) == 0) {
956 err = 0;
957 goto unlock;
958 }
959
960 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
961 hdev->adv_data_len = len;
962
963 cp.length = len;
964 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
965
966unlock:
967 hci_dev_unlock(hdev);
968
969 return err;
970}
971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972/* ---- HCI ioctl helpers ---- */
973
974int hci_dev_open(__u16 dev)
975{
976 struct hci_dev *hdev;
977 int ret = 0;
978
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200979 hdev = hci_dev_get(dev);
980 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 return -ENODEV;
982
983 BT_DBG("%s %p", hdev->name, hdev);
984
985 hci_req_lock(hdev);
986
Johan Hovold94324962012-03-15 14:48:41 +0100987 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
988 ret = -ENODEV;
989 goto done;
990 }
991
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200992 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
993 ret = -ERFKILL;
994 goto done;
995 }
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 if (test_bit(HCI_UP, &hdev->flags)) {
998 ret = -EALREADY;
999 goto done;
1000 }
1001
1002 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1003 set_bit(HCI_RAW, &hdev->flags);
1004
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +02001005 /* Treat all non BR/EDR controllers as raw devices if
1006 enable_hs is not set */
1007 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +01001008 set_bit(HCI_RAW, &hdev->flags);
1009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 if (hdev->open(hdev)) {
1011 ret = -EIO;
1012 goto done;
1013 }
1014
1015 if (!test_bit(HCI_RAW, &hdev->flags)) {
1016 atomic_set(&hdev->cmd_cnt, 1);
1017 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001018 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 clear_bit(HCI_INIT, &hdev->flags);
1020 }
1021
1022 if (!ret) {
1023 hci_dev_hold(hdev);
1024 set_bit(HCI_UP, &hdev->flags);
1025 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001026 hci_update_ad(hdev);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001027 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1028 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001029 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001030 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001031 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001032 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001033 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001035 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001036 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001037 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038
1039 skb_queue_purge(&hdev->cmd_q);
1040 skb_queue_purge(&hdev->rx_q);
1041
1042 if (hdev->flush)
1043 hdev->flush(hdev);
1044
1045 if (hdev->sent_cmd) {
1046 kfree_skb(hdev->sent_cmd);
1047 hdev->sent_cmd = NULL;
1048 }
1049
1050 hdev->close(hdev);
1051 hdev->flags = 0;
1052 }
1053
1054done:
1055 hci_req_unlock(hdev);
1056 hci_dev_put(hdev);
1057 return ret;
1058}
1059
1060static int hci_dev_do_close(struct hci_dev *hdev)
1061{
1062 BT_DBG("%s %p", hdev->name, hdev);
1063
Andre Guedes28b75a82012-02-03 17:48:00 -03001064 cancel_work_sync(&hdev->le_scan);
1065
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001066 cancel_delayed_work(&hdev->power_off);
1067
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 hci_req_cancel(hdev, ENODEV);
1069 hci_req_lock(hdev);
1070
1071 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001072 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 hci_req_unlock(hdev);
1074 return 0;
1075 }
1076
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001077 /* Flush RX and TX works */
1078 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001079 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001081 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001082 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001083 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001084 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001085 }
1086
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001087 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001088 cancel_delayed_work(&hdev->service_cache);
1089
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001090 cancel_delayed_work_sync(&hdev->le_scan_disable);
1091
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001092 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 inquiry_cache_flush(hdev);
1094 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001095 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
1097 hci_notify(hdev, HCI_DEV_DOWN);
1098
1099 if (hdev->flush)
1100 hdev->flush(hdev);
1101
1102 /* Reset device */
1103 skb_queue_purge(&hdev->cmd_q);
1104 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001105 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001106 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001108 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 clear_bit(HCI_INIT, &hdev->flags);
1110 }
1111
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001112 /* flush cmd work */
1113 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
1115 /* Drop queues */
1116 skb_queue_purge(&hdev->rx_q);
1117 skb_queue_purge(&hdev->cmd_q);
1118 skb_queue_purge(&hdev->raw_q);
1119
1120 /* Drop last sent command */
1121 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001122 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 kfree_skb(hdev->sent_cmd);
1124 hdev->sent_cmd = NULL;
1125 }
1126
1127 /* After this point our queues are empty
1128 * and no tasks are scheduled. */
1129 hdev->close(hdev);
1130
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001131 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1132 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001133 hci_dev_lock(hdev);
1134 mgmt_powered(hdev, 0);
1135 hci_dev_unlock(hdev);
1136 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 /* Clear flags */
1139 hdev->flags = 0;
1140
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001141 /* Controller radio is available but is currently powered down */
1142 hdev->amp_status = 0;
1143
Johan Hedberge59fda82012-02-22 18:11:53 +02001144 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 hci_req_unlock(hdev);
1148
1149 hci_dev_put(hdev);
1150 return 0;
1151}
1152
1153int hci_dev_close(__u16 dev)
1154{
1155 struct hci_dev *hdev;
1156 int err;
1157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001158 hdev = hci_dev_get(dev);
1159 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001161
1162 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1163 cancel_delayed_work(&hdev->power_off);
1164
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001166
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 hci_dev_put(hdev);
1168 return err;
1169}
1170
1171int hci_dev_reset(__u16 dev)
1172{
1173 struct hci_dev *hdev;
1174 int ret = 0;
1175
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001176 hdev = hci_dev_get(dev);
1177 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 return -ENODEV;
1179
1180 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
1182 if (!test_bit(HCI_UP, &hdev->flags))
1183 goto done;
1184
1185 /* Drop queues */
1186 skb_queue_purge(&hdev->rx_q);
1187 skb_queue_purge(&hdev->cmd_q);
1188
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001189 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 inquiry_cache_flush(hdev);
1191 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001192 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
1194 if (hdev->flush)
1195 hdev->flush(hdev);
1196
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001197 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001198 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001201 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
1203done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 hci_req_unlock(hdev);
1205 hci_dev_put(hdev);
1206 return ret;
1207}
1208
1209int hci_dev_reset_stat(__u16 dev)
1210{
1211 struct hci_dev *hdev;
1212 int ret = 0;
1213
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001214 hdev = hci_dev_get(dev);
1215 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 return -ENODEV;
1217
1218 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1219
1220 hci_dev_put(hdev);
1221
1222 return ret;
1223}
1224
1225int hci_dev_cmd(unsigned int cmd, void __user *arg)
1226{
1227 struct hci_dev *hdev;
1228 struct hci_dev_req dr;
1229 int err = 0;
1230
1231 if (copy_from_user(&dr, arg, sizeof(dr)))
1232 return -EFAULT;
1233
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001234 hdev = hci_dev_get(dr.dev_id);
1235 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 return -ENODEV;
1237
1238 switch (cmd) {
1239 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001240 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1241 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 break;
1243
1244 case HCISETENCRYPT:
1245 if (!lmp_encrypt_capable(hdev)) {
1246 err = -EOPNOTSUPP;
1247 break;
1248 }
1249
1250 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1251 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001252 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1253 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 if (err)
1255 break;
1256 }
1257
Johan Hedberg01178cd2013-03-05 20:37:41 +02001258 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1259 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 break;
1261
1262 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001263 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1264 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 break;
1266
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001267 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001268 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1269 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001270 break;
1271
1272 case HCISETLINKMODE:
1273 hdev->link_mode = ((__u16) dr.dev_opt) &
1274 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1275 break;
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 case HCISETPTYPE:
1278 hdev->pkt_type = (__u16) dr.dev_opt;
1279 break;
1280
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001282 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1283 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 break;
1285
1286 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001287 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1288 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 break;
1290
1291 default:
1292 err = -EINVAL;
1293 break;
1294 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001295
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 hci_dev_put(hdev);
1297 return err;
1298}
1299
1300int hci_get_dev_list(void __user *arg)
1301{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001302 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 struct hci_dev_list_req *dl;
1304 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 int n = 0, size, err;
1306 __u16 dev_num;
1307
1308 if (get_user(dev_num, (__u16 __user *) arg))
1309 return -EFAULT;
1310
1311 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1312 return -EINVAL;
1313
1314 size = sizeof(*dl) + dev_num * sizeof(*dr);
1315
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001316 dl = kzalloc(size, GFP_KERNEL);
1317 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 return -ENOMEM;
1319
1320 dr = dl->dev_req;
1321
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001322 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001323 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001324 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001325 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001326
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001327 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1328 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001329
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 (dr + n)->dev_id = hdev->id;
1331 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001332
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 if (++n >= dev_num)
1334 break;
1335 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001336 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338 dl->dev_num = n;
1339 size = sizeof(*dl) + n * sizeof(*dr);
1340
1341 err = copy_to_user(arg, dl, size);
1342 kfree(dl);
1343
1344 return err ? -EFAULT : 0;
1345}
1346
1347int hci_get_dev_info(void __user *arg)
1348{
1349 struct hci_dev *hdev;
1350 struct hci_dev_info di;
1351 int err = 0;
1352
1353 if (copy_from_user(&di, arg, sizeof(di)))
1354 return -EFAULT;
1355
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001356 hdev = hci_dev_get(di.dev_id);
1357 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 return -ENODEV;
1359
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001360 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001361 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001362
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001363 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1364 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001365
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 strcpy(di.name, hdev->name);
1367 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001368 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 di.flags = hdev->flags;
1370 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001371 if (lmp_bredr_capable(hdev)) {
1372 di.acl_mtu = hdev->acl_mtu;
1373 di.acl_pkts = hdev->acl_pkts;
1374 di.sco_mtu = hdev->sco_mtu;
1375 di.sco_pkts = hdev->sco_pkts;
1376 } else {
1377 di.acl_mtu = hdev->le_mtu;
1378 di.acl_pkts = hdev->le_pkts;
1379 di.sco_mtu = 0;
1380 di.sco_pkts = 0;
1381 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 di.link_policy = hdev->link_policy;
1383 di.link_mode = hdev->link_mode;
1384
1385 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1386 memcpy(&di.features, &hdev->features, sizeof(di.features));
1387
1388 if (copy_to_user(arg, &di, sizeof(di)))
1389 err = -EFAULT;
1390
1391 hci_dev_put(hdev);
1392
1393 return err;
1394}
1395
1396/* ---- Interface to HCI drivers ---- */
1397
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001398static int hci_rfkill_set_block(void *data, bool blocked)
1399{
1400 struct hci_dev *hdev = data;
1401
1402 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1403
1404 if (!blocked)
1405 return 0;
1406
1407 hci_dev_do_close(hdev);
1408
1409 return 0;
1410}
1411
1412static const struct rfkill_ops hci_rfkill_ops = {
1413 .set_block = hci_rfkill_set_block,
1414};
1415
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001416static void hci_power_on(struct work_struct *work)
1417{
1418 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1419
1420 BT_DBG("%s", hdev->name);
1421
1422 if (hci_dev_open(hdev->id) < 0)
1423 return;
1424
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001425 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001426 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1427 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001428
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001429 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001430 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001431}
1432
1433static void hci_power_off(struct work_struct *work)
1434{
Johan Hedberg32435532011-11-07 22:16:04 +02001435 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001436 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001437
1438 BT_DBG("%s", hdev->name);
1439
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001440 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001441}
1442
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001443static void hci_discov_off(struct work_struct *work)
1444{
1445 struct hci_dev *hdev;
1446 u8 scan = SCAN_PAGE;
1447
1448 hdev = container_of(work, struct hci_dev, discov_off.work);
1449
1450 BT_DBG("%s", hdev->name);
1451
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001452 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001453
1454 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1455
1456 hdev->discov_timeout = 0;
1457
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001458 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001459}
1460
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001461int hci_uuids_clear(struct hci_dev *hdev)
1462{
Johan Hedberg48210022013-01-27 00:31:28 +02001463 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001464
Johan Hedberg48210022013-01-27 00:31:28 +02001465 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1466 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001467 kfree(uuid);
1468 }
1469
1470 return 0;
1471}
1472
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001473int hci_link_keys_clear(struct hci_dev *hdev)
1474{
1475 struct list_head *p, *n;
1476
1477 list_for_each_safe(p, n, &hdev->link_keys) {
1478 struct link_key *key;
1479
1480 key = list_entry(p, struct link_key, list);
1481
1482 list_del(p);
1483 kfree(key);
1484 }
1485
1486 return 0;
1487}
1488
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001489int hci_smp_ltks_clear(struct hci_dev *hdev)
1490{
1491 struct smp_ltk *k, *tmp;
1492
1493 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1494 list_del(&k->list);
1495 kfree(k);
1496 }
1497
1498 return 0;
1499}
1500
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001501struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1502{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001503 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001504
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001505 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001506 if (bacmp(bdaddr, &k->bdaddr) == 0)
1507 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001508
1509 return NULL;
1510}
1511
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301512static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001513 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001514{
1515 /* Legacy key */
1516 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301517 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001518
1519 /* Debug keys are insecure so don't store them persistently */
1520 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301521 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001522
1523 /* Changed combination key and there's no previous one */
1524 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301525 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001526
1527 /* Security mode 3 case */
1528 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301529 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001530
1531 /* Neither local nor remote side had no-bonding as requirement */
1532 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301533 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001534
1535 /* Local side had dedicated bonding as requirement */
1536 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301537 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001538
1539 /* Remote side had dedicated bonding as requirement */
1540 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301541 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001542
1543 /* If none of the above criteria match, then don't store the key
1544 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301545 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001546}
1547
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001548struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001549{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001550 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001551
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001552 list_for_each_entry(k, &hdev->long_term_keys, list) {
1553 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001554 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001555 continue;
1556
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001557 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001558 }
1559
1560 return NULL;
1561}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001562
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001563struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001564 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001565{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001566 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001567
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001568 list_for_each_entry(k, &hdev->long_term_keys, list)
1569 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001570 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001571 return k;
1572
1573 return NULL;
1574}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001575
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001576int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001577 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001578{
1579 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301580 u8 old_key_type;
1581 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001582
1583 old_key = hci_find_link_key(hdev, bdaddr);
1584 if (old_key) {
1585 old_key_type = old_key->type;
1586 key = old_key;
1587 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001588 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001589 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1590 if (!key)
1591 return -ENOMEM;
1592 list_add(&key->list, &hdev->link_keys);
1593 }
1594
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001595 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001596
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001597 /* Some buggy controller combinations generate a changed
1598 * combination key for legacy pairing even when there's no
1599 * previous key */
1600 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001601 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001602 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001603 if (conn)
1604 conn->key_type = type;
1605 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001606
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001607 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001608 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001609 key->pin_len = pin_len;
1610
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001611 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001612 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001613 else
1614 key->type = type;
1615
Johan Hedberg4df378a2011-04-28 11:29:03 -07001616 if (!new_key)
1617 return 0;
1618
1619 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1620
Johan Hedberg744cf192011-11-08 20:40:14 +02001621 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001622
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301623 if (conn)
1624 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001625
1626 return 0;
1627}
1628
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001629int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001630 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001631 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001632{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001633 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001634
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001635 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1636 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001637
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001638 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1639 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001640 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001641 else {
1642 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001643 if (!key)
1644 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001645 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001646 }
1647
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001648 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001649 key->bdaddr_type = addr_type;
1650 memcpy(key->val, tk, sizeof(key->val));
1651 key->authenticated = authenticated;
1652 key->ediv = ediv;
1653 key->enc_size = enc_size;
1654 key->type = type;
1655 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001656
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001657 if (!new_key)
1658 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001659
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001660 if (type & HCI_SMP_LTK)
1661 mgmt_new_ltk(hdev, key, 1);
1662
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001663 return 0;
1664}
1665
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001666int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1667{
1668 struct link_key *key;
1669
1670 key = hci_find_link_key(hdev, bdaddr);
1671 if (!key)
1672 return -ENOENT;
1673
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001674 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001675
1676 list_del(&key->list);
1677 kfree(key);
1678
1679 return 0;
1680}
1681
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001682int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1683{
1684 struct smp_ltk *k, *tmp;
1685
1686 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1687 if (bacmp(bdaddr, &k->bdaddr))
1688 continue;
1689
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001690 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001691
1692 list_del(&k->list);
1693 kfree(k);
1694 }
1695
1696 return 0;
1697}
1698
Ville Tervo6bd32322011-02-16 16:32:41 +02001699/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001700static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001701{
1702 struct hci_dev *hdev = (void *) arg;
1703
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001704 if (hdev->sent_cmd) {
1705 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1706 u16 opcode = __le16_to_cpu(sent->opcode);
1707
1708 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1709 } else {
1710 BT_ERR("%s command tx timeout", hdev->name);
1711 }
1712
Ville Tervo6bd32322011-02-16 16:32:41 +02001713 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001714 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001715}
1716
Szymon Janc2763eda2011-03-22 13:12:22 +01001717struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001718 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001719{
1720 struct oob_data *data;
1721
1722 list_for_each_entry(data, &hdev->remote_oob_data, list)
1723 if (bacmp(bdaddr, &data->bdaddr) == 0)
1724 return data;
1725
1726 return NULL;
1727}
1728
1729int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1730{
1731 struct oob_data *data;
1732
1733 data = hci_find_remote_oob_data(hdev, bdaddr);
1734 if (!data)
1735 return -ENOENT;
1736
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001737 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001738
1739 list_del(&data->list);
1740 kfree(data);
1741
1742 return 0;
1743}
1744
1745int hci_remote_oob_data_clear(struct hci_dev *hdev)
1746{
1747 struct oob_data *data, *n;
1748
1749 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1750 list_del(&data->list);
1751 kfree(data);
1752 }
1753
1754 return 0;
1755}
1756
1757int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001758 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001759{
1760 struct oob_data *data;
1761
1762 data = hci_find_remote_oob_data(hdev, bdaddr);
1763
1764 if (!data) {
1765 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1766 if (!data)
1767 return -ENOMEM;
1768
1769 bacpy(&data->bdaddr, bdaddr);
1770 list_add(&data->list, &hdev->remote_oob_data);
1771 }
1772
1773 memcpy(data->hash, hash, sizeof(data->hash));
1774 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1775
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001776 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001777
1778 return 0;
1779}
1780
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001781struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001782{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001783 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001784
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001785 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001786 if (bacmp(bdaddr, &b->bdaddr) == 0)
1787 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001788
1789 return NULL;
1790}
1791
1792int hci_blacklist_clear(struct hci_dev *hdev)
1793{
1794 struct list_head *p, *n;
1795
1796 list_for_each_safe(p, n, &hdev->blacklist) {
1797 struct bdaddr_list *b;
1798
1799 b = list_entry(p, struct bdaddr_list, list);
1800
1801 list_del(p);
1802 kfree(b);
1803 }
1804
1805 return 0;
1806}
1807
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001808int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001809{
1810 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001811
1812 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1813 return -EBADF;
1814
Antti Julku5e762442011-08-25 16:48:02 +03001815 if (hci_blacklist_lookup(hdev, bdaddr))
1816 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001817
1818 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001819 if (!entry)
1820 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001821
1822 bacpy(&entry->bdaddr, bdaddr);
1823
1824 list_add(&entry->list, &hdev->blacklist);
1825
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001826 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001827}
1828
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001829int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001830{
1831 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001832
Szymon Janc1ec918c2011-11-16 09:32:21 +01001833 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001834 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001835
1836 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001837 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001838 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001839
1840 list_del(&entry->list);
1841 kfree(entry);
1842
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001843 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001844}
1845
Johan Hedberg42c6b122013-03-05 20:37:49 +02001846static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001847{
1848 struct le_scan_params *param = (struct le_scan_params *) opt;
1849 struct hci_cp_le_set_scan_param cp;
1850
1851 memset(&cp, 0, sizeof(cp));
1852 cp.type = param->type;
1853 cp.interval = cpu_to_le16(param->interval);
1854 cp.window = cpu_to_le16(param->window);
1855
Johan Hedberg42c6b122013-03-05 20:37:49 +02001856 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001857}
1858
Johan Hedberg42c6b122013-03-05 20:37:49 +02001859static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001860{
1861 struct hci_cp_le_set_scan_enable cp;
1862
1863 memset(&cp, 0, sizeof(cp));
1864 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001865 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001866
Johan Hedberg42c6b122013-03-05 20:37:49 +02001867 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001868}
1869
1870static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001871 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001872{
1873 long timeo = msecs_to_jiffies(3000);
1874 struct le_scan_params param;
1875 int err;
1876
1877 BT_DBG("%s", hdev->name);
1878
1879 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1880 return -EINPROGRESS;
1881
1882 param.type = type;
1883 param.interval = interval;
1884 param.window = window;
1885
1886 hci_req_lock(hdev);
1887
Johan Hedberg01178cd2013-03-05 20:37:41 +02001888 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1889 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001890 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02001891 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001892
1893 hci_req_unlock(hdev);
1894
1895 if (err < 0)
1896 return err;
1897
Johan Hedberg46818ed2013-01-14 22:33:52 +02001898 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1899 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001900
1901 return 0;
1902}
1903
Andre Guedes7dbfac12012-03-15 16:52:07 -03001904int hci_cancel_le_scan(struct hci_dev *hdev)
1905{
1906 BT_DBG("%s", hdev->name);
1907
1908 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1909 return -EALREADY;
1910
1911 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1912 struct hci_cp_le_set_scan_enable cp;
1913
1914 /* Send HCI command to disable LE Scan */
1915 memset(&cp, 0, sizeof(cp));
1916 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1917 }
1918
1919 return 0;
1920}
1921
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001922static void le_scan_disable_work(struct work_struct *work)
1923{
1924 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001925 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001926 struct hci_cp_le_set_scan_enable cp;
1927
1928 BT_DBG("%s", hdev->name);
1929
1930 memset(&cp, 0, sizeof(cp));
1931
1932 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1933}
1934
Andre Guedes28b75a82012-02-03 17:48:00 -03001935static void le_scan_work(struct work_struct *work)
1936{
1937 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1938 struct le_scan_params *param = &hdev->le_scan_params;
1939
1940 BT_DBG("%s", hdev->name);
1941
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001942 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1943 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001944}
1945
1946int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001947 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001948{
1949 struct le_scan_params *param = &hdev->le_scan_params;
1950
1951 BT_DBG("%s", hdev->name);
1952
Johan Hedbergf15504782012-10-24 21:12:03 +03001953 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1954 return -ENOTSUPP;
1955
Andre Guedes28b75a82012-02-03 17:48:00 -03001956 if (work_busy(&hdev->le_scan))
1957 return -EINPROGRESS;
1958
1959 param->type = type;
1960 param->interval = interval;
1961 param->window = window;
1962 param->timeout = timeout;
1963
1964 queue_work(system_long_wq, &hdev->le_scan);
1965
1966 return 0;
1967}
1968
David Herrmann9be0dab2012-04-22 14:39:57 +02001969/* Alloc HCI device */
1970struct hci_dev *hci_alloc_dev(void)
1971{
1972 struct hci_dev *hdev;
1973
1974 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1975 if (!hdev)
1976 return NULL;
1977
David Herrmannb1b813d2012-04-22 14:39:58 +02001978 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1979 hdev->esco_type = (ESCO_HV1);
1980 hdev->link_mode = (HCI_LM_ACCEPT);
1981 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01001982 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1983 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02001984
David Herrmannb1b813d2012-04-22 14:39:58 +02001985 hdev->sniff_max_interval = 800;
1986 hdev->sniff_min_interval = 80;
1987
1988 mutex_init(&hdev->lock);
1989 mutex_init(&hdev->req_lock);
1990
1991 INIT_LIST_HEAD(&hdev->mgmt_pending);
1992 INIT_LIST_HEAD(&hdev->blacklist);
1993 INIT_LIST_HEAD(&hdev->uuids);
1994 INIT_LIST_HEAD(&hdev->link_keys);
1995 INIT_LIST_HEAD(&hdev->long_term_keys);
1996 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001997 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001998
1999 INIT_WORK(&hdev->rx_work, hci_rx_work);
2000 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2001 INIT_WORK(&hdev->tx_work, hci_tx_work);
2002 INIT_WORK(&hdev->power_on, hci_power_on);
2003 INIT_WORK(&hdev->le_scan, le_scan_work);
2004
David Herrmannb1b813d2012-04-22 14:39:58 +02002005 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2006 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2007 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2008
David Herrmann9be0dab2012-04-22 14:39:57 +02002009 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02002010 skb_queue_head_init(&hdev->rx_q);
2011 skb_queue_head_init(&hdev->cmd_q);
2012 skb_queue_head_init(&hdev->raw_q);
2013
2014 init_waitqueue_head(&hdev->req_wait_q);
2015
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002016 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002017
David Herrmannb1b813d2012-04-22 14:39:58 +02002018 hci_init_sysfs(hdev);
2019 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002020
2021 return hdev;
2022}
2023EXPORT_SYMBOL(hci_alloc_dev);
2024
2025/* Free HCI device */
2026void hci_free_dev(struct hci_dev *hdev)
2027{
2028 skb_queue_purge(&hdev->driver_init);
2029
2030 /* will free via device release */
2031 put_device(&hdev->dev);
2032}
2033EXPORT_SYMBOL(hci_free_dev);
2034
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035/* Register HCI device */
2036int hci_register_dev(struct hci_dev *hdev)
2037{
David Herrmannb1b813d2012-04-22 14:39:58 +02002038 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039
David Herrmann010666a2012-01-07 15:47:07 +01002040 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 return -EINVAL;
2042
Mat Martineau08add512011-11-02 16:18:36 -07002043 /* Do not allow HCI_AMP devices to register at index 0,
2044 * so the index can be used as the AMP controller ID.
2045 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002046 switch (hdev->dev_type) {
2047 case HCI_BREDR:
2048 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2049 break;
2050 case HCI_AMP:
2051 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2052 break;
2053 default:
2054 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002056
Sasha Levin3df92b32012-05-27 22:36:56 +02002057 if (id < 0)
2058 return id;
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 sprintf(hdev->name, "hci%d", id);
2061 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002062
2063 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2064
Sasha Levin3df92b32012-05-27 22:36:56 +02002065 write_lock(&hci_dev_list_lock);
2066 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002067 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002069 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002070 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002071 if (!hdev->workqueue) {
2072 error = -ENOMEM;
2073 goto err;
2074 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002075
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002076 hdev->req_workqueue = alloc_workqueue(hdev->name,
2077 WQ_HIGHPRI | WQ_UNBOUND |
2078 WQ_MEM_RECLAIM, 1);
2079 if (!hdev->req_workqueue) {
2080 destroy_workqueue(hdev->workqueue);
2081 error = -ENOMEM;
2082 goto err;
2083 }
2084
David Herrmann33ca9542011-10-08 14:58:49 +02002085 error = hci_add_sysfs(hdev);
2086 if (error < 0)
2087 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002089 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002090 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2091 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002092 if (hdev->rfkill) {
2093 if (rfkill_register(hdev->rfkill) < 0) {
2094 rfkill_destroy(hdev->rfkill);
2095 hdev->rfkill = NULL;
2096 }
2097 }
2098
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002099 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002100
2101 if (hdev->dev_type != HCI_AMP)
2102 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2103
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002105 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
Johan Hedberg19202572013-01-14 22:33:51 +02002107 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002110
David Herrmann33ca9542011-10-08 14:58:49 +02002111err_wqueue:
2112 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002113 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002114err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002115 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002116 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002117 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002118 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002119
David Herrmann33ca9542011-10-08 14:58:49 +02002120 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121}
2122EXPORT_SYMBOL(hci_register_dev);
2123
2124/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002125void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126{
Sasha Levin3df92b32012-05-27 22:36:56 +02002127 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002128
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002129 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Johan Hovold94324962012-03-15 14:48:41 +01002131 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2132
Sasha Levin3df92b32012-05-27 22:36:56 +02002133 id = hdev->id;
2134
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002135 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002137 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138
2139 hci_dev_do_close(hdev);
2140
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302141 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002142 kfree_skb(hdev->reassembly[i]);
2143
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002144 cancel_work_sync(&hdev->power_on);
2145
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002146 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002147 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002148 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002149 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002150 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002151 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002152
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002153 /* mgmt_index_removed should take care of emptying the
2154 * pending list */
2155 BUG_ON(!list_empty(&hdev->mgmt_pending));
2156
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 hci_notify(hdev, HCI_DEV_UNREG);
2158
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002159 if (hdev->rfkill) {
2160 rfkill_unregister(hdev->rfkill);
2161 rfkill_destroy(hdev->rfkill);
2162 }
2163
David Herrmannce242972011-10-08 14:58:48 +02002164 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002165
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002166 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002167 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002168
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002169 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002170 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002171 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002172 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002173 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002174 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002175 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002176
David Herrmanndc946bd2012-01-07 15:47:24 +01002177 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002178
2179 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180}
2181EXPORT_SYMBOL(hci_unregister_dev);
2182
2183/* Suspend HCI device */
2184int hci_suspend_dev(struct hci_dev *hdev)
2185{
2186 hci_notify(hdev, HCI_DEV_SUSPEND);
2187 return 0;
2188}
2189EXPORT_SYMBOL(hci_suspend_dev);
2190
2191/* Resume HCI device */
2192int hci_resume_dev(struct hci_dev *hdev)
2193{
2194 hci_notify(hdev, HCI_DEV_RESUME);
2195 return 0;
2196}
2197EXPORT_SYMBOL(hci_resume_dev);
2198
Marcel Holtmann76bca882009-11-18 00:40:39 +01002199/* Receive frame from HCI drivers */
2200int hci_recv_frame(struct sk_buff *skb)
2201{
2202 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2203 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002204 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002205 kfree_skb(skb);
2206 return -ENXIO;
2207 }
2208
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002209 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002210 bt_cb(skb)->incoming = 1;
2211
2212 /* Time stamp */
2213 __net_timestamp(skb);
2214
Marcel Holtmann76bca882009-11-18 00:40:39 +01002215 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002216 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002217
Marcel Holtmann76bca882009-11-18 00:40:39 +01002218 return 0;
2219}
2220EXPORT_SYMBOL(hci_recv_frame);
2221
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302222static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002223 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302224{
2225 int len = 0;
2226 int hlen = 0;
2227 int remain = count;
2228 struct sk_buff *skb;
2229 struct bt_skb_cb *scb;
2230
2231 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002232 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302233 return -EILSEQ;
2234
2235 skb = hdev->reassembly[index];
2236
2237 if (!skb) {
2238 switch (type) {
2239 case HCI_ACLDATA_PKT:
2240 len = HCI_MAX_FRAME_SIZE;
2241 hlen = HCI_ACL_HDR_SIZE;
2242 break;
2243 case HCI_EVENT_PKT:
2244 len = HCI_MAX_EVENT_SIZE;
2245 hlen = HCI_EVENT_HDR_SIZE;
2246 break;
2247 case HCI_SCODATA_PKT:
2248 len = HCI_MAX_SCO_SIZE;
2249 hlen = HCI_SCO_HDR_SIZE;
2250 break;
2251 }
2252
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002253 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302254 if (!skb)
2255 return -ENOMEM;
2256
2257 scb = (void *) skb->cb;
2258 scb->expect = hlen;
2259 scb->pkt_type = type;
2260
2261 skb->dev = (void *) hdev;
2262 hdev->reassembly[index] = skb;
2263 }
2264
2265 while (count) {
2266 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002267 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302268
2269 memcpy(skb_put(skb, len), data, len);
2270
2271 count -= len;
2272 data += len;
2273 scb->expect -= len;
2274 remain = count;
2275
2276 switch (type) {
2277 case HCI_EVENT_PKT:
2278 if (skb->len == HCI_EVENT_HDR_SIZE) {
2279 struct hci_event_hdr *h = hci_event_hdr(skb);
2280 scb->expect = h->plen;
2281
2282 if (skb_tailroom(skb) < scb->expect) {
2283 kfree_skb(skb);
2284 hdev->reassembly[index] = NULL;
2285 return -ENOMEM;
2286 }
2287 }
2288 break;
2289
2290 case HCI_ACLDATA_PKT:
2291 if (skb->len == HCI_ACL_HDR_SIZE) {
2292 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2293 scb->expect = __le16_to_cpu(h->dlen);
2294
2295 if (skb_tailroom(skb) < scb->expect) {
2296 kfree_skb(skb);
2297 hdev->reassembly[index] = NULL;
2298 return -ENOMEM;
2299 }
2300 }
2301 break;
2302
2303 case HCI_SCODATA_PKT:
2304 if (skb->len == HCI_SCO_HDR_SIZE) {
2305 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2306 scb->expect = h->dlen;
2307
2308 if (skb_tailroom(skb) < scb->expect) {
2309 kfree_skb(skb);
2310 hdev->reassembly[index] = NULL;
2311 return -ENOMEM;
2312 }
2313 }
2314 break;
2315 }
2316
2317 if (scb->expect == 0) {
2318 /* Complete frame */
2319
2320 bt_cb(skb)->pkt_type = type;
2321 hci_recv_frame(skb);
2322
2323 hdev->reassembly[index] = NULL;
2324 return remain;
2325 }
2326 }
2327
2328 return remain;
2329}
2330
Marcel Holtmannef222012007-07-11 06:42:04 +02002331int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2332{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302333 int rem = 0;
2334
Marcel Holtmannef222012007-07-11 06:42:04 +02002335 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2336 return -EILSEQ;
2337
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002338 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002339 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302340 if (rem < 0)
2341 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002342
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302343 data += (count - rem);
2344 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002345 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002346
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302347 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002348}
2349EXPORT_SYMBOL(hci_recv_fragment);
2350
Suraj Sumangala99811512010-07-14 13:02:19 +05302351#define STREAM_REASSEMBLY 0
2352
2353int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2354{
2355 int type;
2356 int rem = 0;
2357
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002358 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302359 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2360
2361 if (!skb) {
2362 struct { char type; } *pkt;
2363
2364 /* Start of the frame */
2365 pkt = data;
2366 type = pkt->type;
2367
2368 data++;
2369 count--;
2370 } else
2371 type = bt_cb(skb)->pkt_type;
2372
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002373 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002374 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302375 if (rem < 0)
2376 return rem;
2377
2378 data += (count - rem);
2379 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002380 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302381
2382 return rem;
2383}
2384EXPORT_SYMBOL(hci_recv_stream_fragment);
2385
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386/* ---- Interface to upper protocols ---- */
2387
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388int hci_register_cb(struct hci_cb *cb)
2389{
2390 BT_DBG("%p name %s", cb, cb->name);
2391
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002392 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002394 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
2396 return 0;
2397}
2398EXPORT_SYMBOL(hci_register_cb);
2399
2400int hci_unregister_cb(struct hci_cb *cb)
2401{
2402 BT_DBG("%p name %s", cb, cb->name);
2403
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002404 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002406 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
2408 return 0;
2409}
2410EXPORT_SYMBOL(hci_unregister_cb);
2411
2412static int hci_send_frame(struct sk_buff *skb)
2413{
2414 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2415
2416 if (!hdev) {
2417 kfree_skb(skb);
2418 return -ENODEV;
2419 }
2420
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002421 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002423 /* Time stamp */
2424 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002426 /* Send copy to monitor */
2427 hci_send_to_monitor(hdev, skb);
2428
2429 if (atomic_read(&hdev->promisc)) {
2430 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002431 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 }
2433
2434 /* Get rid of skb owner, prior to sending to the driver. */
2435 skb_orphan(skb);
2436
2437 return hdev->send(skb);
2438}
2439
Johan Hedberg3119ae92013-03-05 20:37:44 +02002440void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2441{
2442 skb_queue_head_init(&req->cmd_q);
2443 req->hdev = hdev;
2444}
2445
2446int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2447{
2448 struct hci_dev *hdev = req->hdev;
2449 struct sk_buff *skb;
2450 unsigned long flags;
2451
2452 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2453
2454 /* Do not allow empty requests */
2455 if (skb_queue_empty(&req->cmd_q))
2456 return -EINVAL;
2457
2458 skb = skb_peek_tail(&req->cmd_q);
2459 bt_cb(skb)->req.complete = complete;
2460
2461 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2462 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2463 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2464
2465 queue_work(hdev->workqueue, &hdev->cmd_work);
2466
2467 return 0;
2468}
2469
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002470static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2471 u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472{
2473 int len = HCI_COMMAND_HDR_SIZE + plen;
2474 struct hci_command_hdr *hdr;
2475 struct sk_buff *skb;
2476
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002478 if (!skb)
2479 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480
2481 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002482 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 hdr->plen = plen;
2484
2485 if (plen)
2486 memcpy(skb_put(skb, plen), param, plen);
2487
2488 BT_DBG("skb len %d", skb->len);
2489
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002490 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002492
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002493 return skb;
2494}
2495
2496/* Send HCI command */
2497int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2498{
2499 struct sk_buff *skb;
2500
2501 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2502
2503 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2504 if (!skb) {
2505 BT_ERR("%s no memory for command", hdev->name);
2506 return -ENOMEM;
2507 }
2508
Johan Hedberg11714b32013-03-05 20:37:47 +02002509 /* Stand-alone HCI commands must be flaged as
2510 * single-command requests.
2511 */
2512 bt_cb(skb)->req.start = true;
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002515 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
2517 return 0;
2518}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
Johan Hedberg71c76a12013-03-05 20:37:46 +02002520/* Queue a command to an asynchronous HCI request */
2521int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2522{
2523 struct hci_dev *hdev = req->hdev;
2524 struct sk_buff *skb;
2525
2526 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2527
2528 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2529 if (!skb) {
2530 BT_ERR("%s no memory for command", hdev->name);
2531 return -ENOMEM;
2532 }
2533
2534 if (skb_queue_empty(&req->cmd_q))
2535 bt_cb(skb)->req.start = true;
2536
2537 skb_queue_tail(&req->cmd_q, skb);
2538
2539 return 0;
2540}
2541
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002543void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544{
2545 struct hci_command_hdr *hdr;
2546
2547 if (!hdev->sent_cmd)
2548 return NULL;
2549
2550 hdr = (void *) hdev->sent_cmd->data;
2551
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002552 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 return NULL;
2554
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002555 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
2557 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2558}
2559
2560/* Send ACL data */
2561static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2562{
2563 struct hci_acl_hdr *hdr;
2564 int len = skb->len;
2565
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002566 skb_push(skb, HCI_ACL_HDR_SIZE);
2567 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002568 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002569 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2570 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571}
2572
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002573static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002574 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002576 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 struct hci_dev *hdev = conn->hdev;
2578 struct sk_buff *list;
2579
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002580 skb->len = skb_headlen(skb);
2581 skb->data_len = 0;
2582
2583 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002584
2585 switch (hdev->dev_type) {
2586 case HCI_BREDR:
2587 hci_add_acl_hdr(skb, conn->handle, flags);
2588 break;
2589 case HCI_AMP:
2590 hci_add_acl_hdr(skb, chan->handle, flags);
2591 break;
2592 default:
2593 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2594 return;
2595 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002596
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002597 list = skb_shinfo(skb)->frag_list;
2598 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 /* Non fragmented */
2600 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2601
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002602 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 } else {
2604 /* Fragmented */
2605 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2606
2607 skb_shinfo(skb)->frag_list = NULL;
2608
2609 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002610 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002612 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002613
2614 flags &= ~ACL_START;
2615 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 do {
2617 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002620 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002621 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
2623 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2624
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002625 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 } while (list);
2627
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002628 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002630}
2631
2632void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2633{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002634 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002635
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002636 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002637
2638 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002639
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002640 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002642 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644
2645/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002646void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647{
2648 struct hci_dev *hdev = conn->hdev;
2649 struct hci_sco_hdr hdr;
2650
2651 BT_DBG("%s len %d", hdev->name, skb->len);
2652
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002653 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 hdr.dlen = skb->len;
2655
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002656 skb_push(skb, HCI_SCO_HDR_SIZE);
2657 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002658 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659
2660 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002661 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002662
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002664 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666
2667/* ---- HCI TX task (outgoing data) ---- */
2668
2669/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002670static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2671 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672{
2673 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002674 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002675 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002677 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002679
2680 rcu_read_lock();
2681
2682 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002683 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002685
2686 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2687 continue;
2688
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 num++;
2690
2691 if (c->sent < min) {
2692 min = c->sent;
2693 conn = c;
2694 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002695
2696 if (hci_conn_num(hdev, type) == num)
2697 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 }
2699
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002700 rcu_read_unlock();
2701
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002703 int cnt, q;
2704
2705 switch (conn->type) {
2706 case ACL_LINK:
2707 cnt = hdev->acl_cnt;
2708 break;
2709 case SCO_LINK:
2710 case ESCO_LINK:
2711 cnt = hdev->sco_cnt;
2712 break;
2713 case LE_LINK:
2714 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2715 break;
2716 default:
2717 cnt = 0;
2718 BT_ERR("Unknown link type");
2719 }
2720
2721 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 *quote = q ? q : 1;
2723 } else
2724 *quote = 0;
2725
2726 BT_DBG("conn %p quote %d", conn, *quote);
2727 return conn;
2728}
2729
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002730static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731{
2732 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002733 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734
Ville Tervobae1f5d92011-02-10 22:38:53 -03002735 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002737 rcu_read_lock();
2738
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002740 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002741 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002742 BT_ERR("%s killing stalled connection %pMR",
2743 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002744 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 }
2746 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002747
2748 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749}
2750
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002751static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2752 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002753{
2754 struct hci_conn_hash *h = &hdev->conn_hash;
2755 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002756 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002757 struct hci_conn *conn;
2758 int cnt, q, conn_num = 0;
2759
2760 BT_DBG("%s", hdev->name);
2761
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002762 rcu_read_lock();
2763
2764 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002765 struct hci_chan *tmp;
2766
2767 if (conn->type != type)
2768 continue;
2769
2770 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2771 continue;
2772
2773 conn_num++;
2774
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002775 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002776 struct sk_buff *skb;
2777
2778 if (skb_queue_empty(&tmp->data_q))
2779 continue;
2780
2781 skb = skb_peek(&tmp->data_q);
2782 if (skb->priority < cur_prio)
2783 continue;
2784
2785 if (skb->priority > cur_prio) {
2786 num = 0;
2787 min = ~0;
2788 cur_prio = skb->priority;
2789 }
2790
2791 num++;
2792
2793 if (conn->sent < min) {
2794 min = conn->sent;
2795 chan = tmp;
2796 }
2797 }
2798
2799 if (hci_conn_num(hdev, type) == conn_num)
2800 break;
2801 }
2802
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002803 rcu_read_unlock();
2804
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002805 if (!chan)
2806 return NULL;
2807
2808 switch (chan->conn->type) {
2809 case ACL_LINK:
2810 cnt = hdev->acl_cnt;
2811 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002812 case AMP_LINK:
2813 cnt = hdev->block_cnt;
2814 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002815 case SCO_LINK:
2816 case ESCO_LINK:
2817 cnt = hdev->sco_cnt;
2818 break;
2819 case LE_LINK:
2820 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2821 break;
2822 default:
2823 cnt = 0;
2824 BT_ERR("Unknown link type");
2825 }
2826
2827 q = cnt / num;
2828 *quote = q ? q : 1;
2829 BT_DBG("chan %p quote %d", chan, *quote);
2830 return chan;
2831}
2832
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002833static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2834{
2835 struct hci_conn_hash *h = &hdev->conn_hash;
2836 struct hci_conn *conn;
2837 int num = 0;
2838
2839 BT_DBG("%s", hdev->name);
2840
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002841 rcu_read_lock();
2842
2843 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002844 struct hci_chan *chan;
2845
2846 if (conn->type != type)
2847 continue;
2848
2849 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2850 continue;
2851
2852 num++;
2853
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002854 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002855 struct sk_buff *skb;
2856
2857 if (chan->sent) {
2858 chan->sent = 0;
2859 continue;
2860 }
2861
2862 if (skb_queue_empty(&chan->data_q))
2863 continue;
2864
2865 skb = skb_peek(&chan->data_q);
2866 if (skb->priority >= HCI_PRIO_MAX - 1)
2867 continue;
2868
2869 skb->priority = HCI_PRIO_MAX - 1;
2870
2871 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002872 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002873 }
2874
2875 if (hci_conn_num(hdev, type) == num)
2876 break;
2877 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002878
2879 rcu_read_unlock();
2880
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002881}
2882
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002883static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2884{
2885 /* Calculate count of blocks used by this packet */
2886 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2887}
2888
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002889static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 if (!test_bit(HCI_RAW, &hdev->flags)) {
2892 /* ACL tx timeout must be longer than maximum
2893 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002894 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002895 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002896 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002898}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002900static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002901{
2902 unsigned int cnt = hdev->acl_cnt;
2903 struct hci_chan *chan;
2904 struct sk_buff *skb;
2905 int quote;
2906
2907 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002908
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002909 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002910 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002911 u32 priority = (skb_peek(&chan->data_q))->priority;
2912 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002913 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002914 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002915
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002916 /* Stop if priority has changed */
2917 if (skb->priority < priority)
2918 break;
2919
2920 skb = skb_dequeue(&chan->data_q);
2921
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002922 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002923 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002924
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 hci_send_frame(skb);
2926 hdev->acl_last_tx = jiffies;
2927
2928 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002929 chan->sent++;
2930 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 }
2932 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002933
2934 if (cnt != hdev->acl_cnt)
2935 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936}
2937
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002938static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002939{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002940 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002941 struct hci_chan *chan;
2942 struct sk_buff *skb;
2943 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002944 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002945
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002946 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002947
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002948 BT_DBG("%s", hdev->name);
2949
2950 if (hdev->dev_type == HCI_AMP)
2951 type = AMP_LINK;
2952 else
2953 type = ACL_LINK;
2954
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002955 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002956 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002957 u32 priority = (skb_peek(&chan->data_q))->priority;
2958 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2959 int blocks;
2960
2961 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002962 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002963
2964 /* Stop if priority has changed */
2965 if (skb->priority < priority)
2966 break;
2967
2968 skb = skb_dequeue(&chan->data_q);
2969
2970 blocks = __get_blocks(hdev, skb);
2971 if (blocks > hdev->block_cnt)
2972 return;
2973
2974 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002975 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002976
2977 hci_send_frame(skb);
2978 hdev->acl_last_tx = jiffies;
2979
2980 hdev->block_cnt -= blocks;
2981 quote -= blocks;
2982
2983 chan->sent += blocks;
2984 chan->conn->sent += blocks;
2985 }
2986 }
2987
2988 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002989 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002990}
2991
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002992static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002993{
2994 BT_DBG("%s", hdev->name);
2995
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002996 /* No ACL link over BR/EDR controller */
2997 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2998 return;
2999
3000 /* No AMP link over AMP controller */
3001 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003002 return;
3003
3004 switch (hdev->flow_ctl_mode) {
3005 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3006 hci_sched_acl_pkt(hdev);
3007 break;
3008
3009 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3010 hci_sched_acl_blk(hdev);
3011 break;
3012 }
3013}
3014
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003016static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017{
3018 struct hci_conn *conn;
3019 struct sk_buff *skb;
3020 int quote;
3021
3022 BT_DBG("%s", hdev->name);
3023
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003024 if (!hci_conn_num(hdev, SCO_LINK))
3025 return;
3026
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3028 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3029 BT_DBG("skb %p len %d", skb, skb->len);
3030 hci_send_frame(skb);
3031
3032 conn->sent++;
3033 if (conn->sent == ~0)
3034 conn->sent = 0;
3035 }
3036 }
3037}
3038
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003039static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003040{
3041 struct hci_conn *conn;
3042 struct sk_buff *skb;
3043 int quote;
3044
3045 BT_DBG("%s", hdev->name);
3046
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003047 if (!hci_conn_num(hdev, ESCO_LINK))
3048 return;
3049
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003050 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3051 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003052 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3053 BT_DBG("skb %p len %d", skb, skb->len);
3054 hci_send_frame(skb);
3055
3056 conn->sent++;
3057 if (conn->sent == ~0)
3058 conn->sent = 0;
3059 }
3060 }
3061}
3062
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003063static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003064{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003065 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003066 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003067 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003068
3069 BT_DBG("%s", hdev->name);
3070
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003071 if (!hci_conn_num(hdev, LE_LINK))
3072 return;
3073
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003074 if (!test_bit(HCI_RAW, &hdev->flags)) {
3075 /* LE tx timeout must be longer than maximum
3076 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003077 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003078 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003079 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003080 }
3081
3082 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003083 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003084 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003085 u32 priority = (skb_peek(&chan->data_q))->priority;
3086 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003087 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003088 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003089
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003090 /* Stop if priority has changed */
3091 if (skb->priority < priority)
3092 break;
3093
3094 skb = skb_dequeue(&chan->data_q);
3095
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003096 hci_send_frame(skb);
3097 hdev->le_last_tx = jiffies;
3098
3099 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003100 chan->sent++;
3101 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003102 }
3103 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003104
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003105 if (hdev->le_pkts)
3106 hdev->le_cnt = cnt;
3107 else
3108 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003109
3110 if (cnt != tmp)
3111 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003112}
3113
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003114static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003116 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 struct sk_buff *skb;
3118
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003119 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003120 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121
3122 /* Schedule queues and send stuff to HCI driver */
3123
3124 hci_sched_acl(hdev);
3125
3126 hci_sched_sco(hdev);
3127
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003128 hci_sched_esco(hdev);
3129
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003130 hci_sched_le(hdev);
3131
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 /* Send next queued raw (unknown type) packet */
3133 while ((skb = skb_dequeue(&hdev->raw_q)))
3134 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135}
3136
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003137/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138
3139/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003140static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141{
3142 struct hci_acl_hdr *hdr = (void *) skb->data;
3143 struct hci_conn *conn;
3144 __u16 handle, flags;
3145
3146 skb_pull(skb, HCI_ACL_HDR_SIZE);
3147
3148 handle = __le16_to_cpu(hdr->handle);
3149 flags = hci_flags(handle);
3150 handle = hci_handle(handle);
3151
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003152 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003153 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154
3155 hdev->stat.acl_rx++;
3156
3157 hci_dev_lock(hdev);
3158 conn = hci_conn_hash_lookup_handle(hdev, handle);
3159 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003160
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003162 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003163
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003165 l2cap_recv_acldata(conn, skb, flags);
3166 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003168 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003169 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 }
3171
3172 kfree_skb(skb);
3173}
3174
3175/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003176static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177{
3178 struct hci_sco_hdr *hdr = (void *) skb->data;
3179 struct hci_conn *conn;
3180 __u16 handle;
3181
3182 skb_pull(skb, HCI_SCO_HDR_SIZE);
3183
3184 handle = __le16_to_cpu(hdr->handle);
3185
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003186 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187
3188 hdev->stat.sco_rx++;
3189
3190 hci_dev_lock(hdev);
3191 conn = hci_conn_hash_lookup_handle(hdev, handle);
3192 hci_dev_unlock(hdev);
3193
3194 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003196 sco_recv_scodata(conn, skb);
3197 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003199 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003200 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 }
3202
3203 kfree_skb(skb);
3204}
3205
Johan Hedberg9238f362013-03-05 20:37:48 +02003206static bool hci_req_is_complete(struct hci_dev *hdev)
3207{
3208 struct sk_buff *skb;
3209
3210 skb = skb_peek(&hdev->cmd_q);
3211 if (!skb)
3212 return true;
3213
3214 return bt_cb(skb)->req.start;
3215}
3216
Johan Hedberg42c6b122013-03-05 20:37:49 +02003217static void hci_resend_last(struct hci_dev *hdev)
3218{
3219 struct hci_command_hdr *sent;
3220 struct sk_buff *skb;
3221 u16 opcode;
3222
3223 if (!hdev->sent_cmd)
3224 return;
3225
3226 sent = (void *) hdev->sent_cmd->data;
3227 opcode = __le16_to_cpu(sent->opcode);
3228 if (opcode == HCI_OP_RESET)
3229 return;
3230
3231 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3232 if (!skb)
3233 return;
3234
3235 skb_queue_head(&hdev->cmd_q, skb);
3236 queue_work(hdev->workqueue, &hdev->cmd_work);
3237}
3238
Johan Hedberg9238f362013-03-05 20:37:48 +02003239void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3240{
3241 hci_req_complete_t req_complete = NULL;
3242 struct sk_buff *skb;
3243 unsigned long flags;
3244
3245 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3246
Johan Hedberg42c6b122013-03-05 20:37:49 +02003247 /* If the completed command doesn't match the last one that was
3248 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003249 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003250 if (!hci_sent_cmd_data(hdev, opcode)) {
3251 /* Some CSR based controllers generate a spontaneous
3252 * reset complete event during init and any pending
3253 * command will never be completed. In such a case we
3254 * need to resend whatever was the last sent
3255 * command.
3256 */
3257 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3258 hci_resend_last(hdev);
3259
Johan Hedberg9238f362013-03-05 20:37:48 +02003260 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003261 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003262
3263 /* If the command succeeded and there's still more commands in
3264 * this request the request is not yet complete.
3265 */
3266 if (!status && !hci_req_is_complete(hdev))
3267 return;
3268
3269 /* If this was the last command in a request the complete
3270 * callback would be found in hdev->sent_cmd instead of the
3271 * command queue (hdev->cmd_q).
3272 */
3273 if (hdev->sent_cmd) {
3274 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3275 if (req_complete)
3276 goto call_complete;
3277 }
3278
3279 /* Remove all pending commands belonging to this request */
3280 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3281 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3282 if (bt_cb(skb)->req.start) {
3283 __skb_queue_head(&hdev->cmd_q, skb);
3284 break;
3285 }
3286
3287 req_complete = bt_cb(skb)->req.complete;
3288 kfree_skb(skb);
3289 }
3290 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3291
3292call_complete:
3293 if (req_complete)
3294 req_complete(hdev, status);
3295}
3296
3297void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3298{
3299 hci_req_complete_t req_complete = NULL;
3300
3301 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3302
3303 if (status) {
3304 hci_req_cmd_complete(hdev, opcode, status);
3305 return;
3306 }
3307
3308 /* No need to handle success status if there are more commands */
3309 if (!hci_req_is_complete(hdev))
3310 return;
3311
3312 if (hdev->sent_cmd)
3313 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3314
3315 /* If the request doesn't have a complete callback or there
3316 * are other commands/requests in the hdev queue we consider
3317 * this request as completed.
3318 */
3319 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3320 hci_req_cmd_complete(hdev, opcode, status);
3321}
3322
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003323static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003325 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 struct sk_buff *skb;
3327
3328 BT_DBG("%s", hdev->name);
3329
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003331 /* Send copy to monitor */
3332 hci_send_to_monitor(hdev, skb);
3333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 if (atomic_read(&hdev->promisc)) {
3335 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003336 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 }
3338
3339 if (test_bit(HCI_RAW, &hdev->flags)) {
3340 kfree_skb(skb);
3341 continue;
3342 }
3343
3344 if (test_bit(HCI_INIT, &hdev->flags)) {
3345 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003346 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347 case HCI_ACLDATA_PKT:
3348 case HCI_SCODATA_PKT:
3349 kfree_skb(skb);
3350 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 }
3353
3354 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003355 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003357 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 hci_event_packet(hdev, skb);
3359 break;
3360
3361 case HCI_ACLDATA_PKT:
3362 BT_DBG("%s ACL data packet", hdev->name);
3363 hci_acldata_packet(hdev, skb);
3364 break;
3365
3366 case HCI_SCODATA_PKT:
3367 BT_DBG("%s SCO data packet", hdev->name);
3368 hci_scodata_packet(hdev, skb);
3369 break;
3370
3371 default:
3372 kfree_skb(skb);
3373 break;
3374 }
3375 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376}
3377
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003378static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003380 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 struct sk_buff *skb;
3382
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003383 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3384 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003387 if (atomic_read(&hdev->cmd_cnt)) {
3388 skb = skb_dequeue(&hdev->cmd_q);
3389 if (!skb)
3390 return;
3391
Wei Yongjun7585b972009-02-25 18:29:52 +08003392 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003394 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3395 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 atomic_dec(&hdev->cmd_cnt);
3397 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003398 if (test_bit(HCI_RESET, &hdev->flags))
3399 del_timer(&hdev->cmd_timer);
3400 else
3401 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003402 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 } else {
3404 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003405 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 }
3407 }
3408}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003409
3410int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3411{
3412 /* General inquiry access code (GIAC) */
3413 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3414 struct hci_cp_inquiry cp;
3415
3416 BT_DBG("%s", hdev->name);
3417
3418 if (test_bit(HCI_INQUIRY, &hdev->flags))
3419 return -EINPROGRESS;
3420
Johan Hedberg46632622012-01-02 16:06:08 +02003421 inquiry_cache_flush(hdev);
3422
Andre Guedes2519a1f2011-11-07 11:45:24 -03003423 memset(&cp, 0, sizeof(cp));
3424 memcpy(&cp.lap, lap, sizeof(cp.lap));
3425 cp.length = length;
3426
3427 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3428}
Andre Guedes023d50492011-11-04 14:16:52 -03003429
3430int hci_cancel_inquiry(struct hci_dev *hdev)
3431{
3432 BT_DBG("%s", hdev->name);
3433
3434 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003435 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003436
3437 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3438}
Andre Guedes31f79562012-04-24 21:02:53 -03003439
3440u8 bdaddr_to_le(u8 bdaddr_type)
3441{
3442 switch (bdaddr_type) {
3443 case BDADDR_LE_PUBLIC:
3444 return ADDR_LE_DEV_PUBLIC;
3445
3446 default:
3447 /* Fallback to LE Random address type */
3448 return ADDR_LE_DEV_RANDOM;
3449 }
3450}