blob: a199d631e31cfd8cc82b0a509e12debf9a0bce2f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +020083static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +020084 void (*func)(struct hci_request *req,
85 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +020086 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Johan Hedberg42c6b122013-03-05 20:37:49 +020088 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
Johan Hedberg42c6b122013-03-05 20:37:49 +020094 hci_req_init(&req, hdev);
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 hdev->req_status = HCI_REQ_PEND;
97
Johan Hedberg42c6b122013-03-05 20:37:49 +020098 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +020099
Johan Hedberg42c6b122013-03-05 20:37:49 +0200100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200102 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200108 */
Andre Guedes920c8302013-03-08 11:20:15 -0300109 if (err == -ENODATA)
110 return 0;
111
112 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200113 }
114
Andre Guedesbc4445c2013-03-08 11:20:13 -0300115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 schedule_timeout(timeout);
119
120 remove_wait_queue(&hdev->req_wait_q, &wait);
121
122 if (signal_pending(current))
123 return -EINTR;
124
125 switch (hdev->req_status) {
126 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700127 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 break;
129
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
132 break;
133
134 default:
135 err = -ETIMEDOUT;
136 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Johan Hedberga5040ef2011-01-10 13:28:59 +0200139 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 BT_DBG("%s end: err %d", hdev->name, err);
142
143 return err;
144}
145
Johan Hedberg01178cd2013-03-05 20:37:41 +0200146static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200147 void (*req)(struct hci_request *req,
148 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200149 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
151 int ret;
152
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200153 if (!test_bit(HCI_UP, &hdev->flags))
154 return -ENETDOWN;
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 /* Serialize all requests */
157 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200158 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 hci_req_unlock(hdev);
160
161 return ret;
162}
163
Johan Hedberg42c6b122013-03-05 20:37:49 +0200164static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200166 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Johan Hedberg42c6b122013-03-05 20:37:49 +0200173static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200180 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200182
183 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
Johan Hedberg42c6b122013-03-05 20:37:49 +0200187static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200188{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200190
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200191 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300193
194 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300196
197 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200199}
200
Johan Hedberg42c6b122013-03-05 20:37:49 +0200201static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200205 struct sk_buff *skb;
206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
Johan Hedberg42c6b122013-03-05 20:37:49 +0200211 hci_req_init(&init_req, hdev);
212
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
217
Johan Hedberg42c6b122013-03-05 20:37:49 +0200218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200222 }
223 skb_queue_purge(&hdev->driver_init);
224
Johan Hedberg42c6b122013-03-05 20:37:49 +0200225 hci_req_run(&init_req, NULL);
226
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300227 /* Reset */
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200229 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300230
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200231 switch (hdev->dev_type) {
232 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200233 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200234 break;
235
236 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200237 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200238 break;
239
240 default:
241 BT_ERR("Unknown device type %d", hdev->dev_type);
242 break;
243 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200244}
245
Johan Hedberg42c6b122013-03-05 20:37:49 +0200246static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200254
255 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200257
258 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200260
261 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedbergf332ec62013-03-15 17:07:11 -0500275
276 /* Read page scan parameters */
277 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
278 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
279 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
280 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200281}
282
Johan Hedberg42c6b122013-03-05 20:37:49 +0200283static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200284{
285 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200286 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200287
288 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200290
291 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200292 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200293
294 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200295 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200296
297 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200298 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200299}
300
301static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
302{
303 if (lmp_ext_inq_capable(hdev))
304 return 0x02;
305
306 if (lmp_inq_rssi_capable(hdev))
307 return 0x01;
308
309 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310 hdev->lmp_subver == 0x0757)
311 return 0x01;
312
313 if (hdev->manufacturer == 15) {
314 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
315 return 0x01;
316 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
317 return 0x01;
318 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
319 return 0x01;
320 }
321
322 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323 hdev->lmp_subver == 0x1805)
324 return 0x01;
325
326 return 0x00;
327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200330{
331 u8 mode;
332
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200334
Johan Hedberg42c6b122013-03-05 20:37:49 +0200335 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200336}
337
Johan Hedberg42c6b122013-03-05 20:37:49 +0200338static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200339{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200340 struct hci_dev *hdev = req->hdev;
341
Johan Hedberg2177bab2013-03-05 20:37:43 +0200342 /* The second byte is 0xff instead of 0x9f (two reserved bits
343 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
344 * command otherwise.
345 */
346 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
347
348 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
349 * any event mask for pre 1.2 devices.
350 */
351 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
352 return;
353
354 if (lmp_bredr_capable(hdev)) {
355 events[4] |= 0x01; /* Flow Specification Complete */
356 events[4] |= 0x02; /* Inquiry Result with RSSI */
357 events[4] |= 0x04; /* Read Remote Extended Features Complete */
358 events[5] |= 0x08; /* Synchronous Connection Complete */
359 events[5] |= 0x10; /* Synchronous Connection Changed */
360 }
361
362 if (lmp_inq_rssi_capable(hdev))
363 events[4] |= 0x02; /* Inquiry Result with RSSI */
364
365 if (lmp_sniffsubr_capable(hdev))
366 events[5] |= 0x20; /* Sniff Subrating */
367
368 if (lmp_pause_enc_capable(hdev))
369 events[5] |= 0x80; /* Encryption Key Refresh Complete */
370
371 if (lmp_ext_inq_capable(hdev))
372 events[5] |= 0x40; /* Extended Inquiry Result */
373
374 if (lmp_no_flush_capable(hdev))
375 events[7] |= 0x01; /* Enhanced Flush Complete */
376
377 if (lmp_lsto_capable(hdev))
378 events[6] |= 0x80; /* Link Supervision Timeout Changed */
379
380 if (lmp_ssp_capable(hdev)) {
381 events[6] |= 0x01; /* IO Capability Request */
382 events[6] |= 0x02; /* IO Capability Response */
383 events[6] |= 0x04; /* User Confirmation Request */
384 events[6] |= 0x08; /* User Passkey Request */
385 events[6] |= 0x10; /* Remote OOB Data Request */
386 events[6] |= 0x20; /* Simple Pairing Complete */
387 events[7] |= 0x04; /* User Passkey Notification */
388 events[7] |= 0x08; /* Keypress Notification */
389 events[7] |= 0x10; /* Remote Host Supported
390 * Features Notification
391 */
392 }
393
394 if (lmp_le_capable(hdev))
395 events[7] |= 0x20; /* LE Meta-Event */
396
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398
399 if (lmp_le_capable(hdev)) {
400 memset(events, 0, sizeof(events));
401 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200402 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
403 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200404 }
405}
406
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200409 struct hci_dev *hdev = req->hdev;
410
Johan Hedberg2177bab2013-03-05 20:37:43 +0200411 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200412 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200413
414 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200415 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200416
Johan Hedberg42c6b122013-03-05 20:37:49 +0200417 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200418
419 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200420 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200421
422 if (lmp_ssp_capable(hdev)) {
423 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
424 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200425 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
426 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427 } else {
428 struct hci_cp_write_eir cp;
429
430 memset(hdev->eir, 0, sizeof(hdev->eir));
431 memset(&cp, 0, sizeof(cp));
432
Johan Hedberg42c6b122013-03-05 20:37:49 +0200433 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200434 }
435 }
436
437 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439
440 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200442
443 if (lmp_ext_feat_capable(hdev)) {
444 struct hci_cp_read_local_ext_features cp;
445
446 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200447 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
448 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449 }
450
451 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
452 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
454 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455 }
456}
457
Johan Hedberg42c6b122013-03-05 20:37:49 +0200458static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200459{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200460 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461 struct hci_cp_write_def_link_policy cp;
462 u16 link_policy = 0;
463
464 if (lmp_rswitch_capable(hdev))
465 link_policy |= HCI_LP_RSWITCH;
466 if (lmp_hold_capable(hdev))
467 link_policy |= HCI_LP_HOLD;
468 if (lmp_sniff_capable(hdev))
469 link_policy |= HCI_LP_SNIFF;
470 if (lmp_park_capable(hdev))
471 link_policy |= HCI_LP_PARK;
472
473 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200474 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200475}
476
Johan Hedberg42c6b122013-03-05 20:37:49 +0200477static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200478{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200479 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480 struct hci_cp_write_le_host_supported cp;
481
482 memset(&cp, 0, sizeof(cp));
483
484 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
485 cp.le = 0x01;
486 cp.simul = lmp_le_br_capable(hdev);
487 }
488
489 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
491 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492}
493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496 struct hci_dev *hdev = req->hdev;
497
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500501 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500503 hci_update_ad(req);
504 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505}
506
507static int __hci_init(struct hci_dev *hdev)
508{
509 int err;
510
511 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
512 if (err < 0)
513 return err;
514
515 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
516 * BR/EDR/LE type controllers. AMP controllers only need the
517 * first stage init.
518 */
519 if (hdev->dev_type != HCI_BREDR)
520 return 0;
521
522 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
523 if (err < 0)
524 return err;
525
526 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
527}
528
Johan Hedberg42c6b122013-03-05 20:37:49 +0200529static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
531 __u8 scan = opt;
532
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200536 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537}
538
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540{
541 __u8 auth = opt;
542
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
545 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200546 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
Johan Hedberg42c6b122013-03-05 20:37:49 +0200549static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550{
551 __u8 encrypt = opt;
552
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200555 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200556 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557}
558
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200560{
561 __le16 policy = cpu_to_le16(opt);
562
Johan Hedberg42c6b122013-03-05 20:37:49 +0200563 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200564
565 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200566 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200567}
568
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900569/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 * Device is held on return. */
571struct hci_dev *hci_dev_get(int index)
572{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200573 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575 BT_DBG("%d", index);
576
577 if (index < 0)
578 return NULL;
579
580 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200581 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 if (d->id == index) {
583 hdev = hci_dev_hold(d);
584 break;
585 }
586 }
587 read_unlock(&hci_dev_list_lock);
588 return hdev;
589}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200592
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200593bool hci_discovery_active(struct hci_dev *hdev)
594{
595 struct discovery_state *discov = &hdev->discovery;
596
Andre Guedes6fbe1952012-02-03 17:47:58 -0300597 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300598 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300599 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200600 return true;
601
Andre Guedes6fbe1952012-02-03 17:47:58 -0300602 default:
603 return false;
604 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200605}
606
Johan Hedbergff9ef572012-01-04 14:23:45 +0200607void hci_discovery_set_state(struct hci_dev *hdev, int state)
608{
609 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
610
611 if (hdev->discovery.state == state)
612 return;
613
614 switch (state) {
615 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300616 if (hdev->discovery.state != DISCOVERY_STARTING)
617 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200618 break;
619 case DISCOVERY_STARTING:
620 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300621 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200622 mgmt_discovering(hdev, 1);
623 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200624 case DISCOVERY_RESOLVING:
625 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200626 case DISCOVERY_STOPPING:
627 break;
628 }
629
630 hdev->discovery.state = state;
631}
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633static void inquiry_cache_flush(struct hci_dev *hdev)
634{
Johan Hedberg30883512012-01-04 14:16:21 +0200635 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200636 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
Johan Hedberg561aafb2012-01-04 13:31:59 +0200638 list_for_each_entry_safe(p, n, &cache->all, all) {
639 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200640 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200642
643 INIT_LIST_HEAD(&cache->unknown);
644 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645}
646
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300647struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
648 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649{
Johan Hedberg30883512012-01-04 14:16:21 +0200650 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 struct inquiry_entry *e;
652
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300653 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Johan Hedberg561aafb2012-01-04 13:31:59 +0200655 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200657 return e;
658 }
659
660 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661}
662
Johan Hedberg561aafb2012-01-04 13:31:59 +0200663struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300664 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200665{
Johan Hedberg30883512012-01-04 14:16:21 +0200666 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200667 struct inquiry_entry *e;
668
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300669 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200670
671 list_for_each_entry(e, &cache->unknown, list) {
672 if (!bacmp(&e->data.bdaddr, bdaddr))
673 return e;
674 }
675
676 return NULL;
677}
678
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200679struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300680 bdaddr_t *bdaddr,
681 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200682{
683 struct discovery_state *cache = &hdev->discovery;
684 struct inquiry_entry *e;
685
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300686 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200687
688 list_for_each_entry(e, &cache->resolve, list) {
689 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
690 return e;
691 if (!bacmp(&e->data.bdaddr, bdaddr))
692 return e;
693 }
694
695 return NULL;
696}
697
Johan Hedberga3d4e202012-01-09 00:53:02 +0200698void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300699 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200700{
701 struct discovery_state *cache = &hdev->discovery;
702 struct list_head *pos = &cache->resolve;
703 struct inquiry_entry *p;
704
705 list_del(&ie->list);
706
707 list_for_each_entry(p, &cache->resolve, list) {
708 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300709 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200710 break;
711 pos = &p->list;
712 }
713
714 list_add(&ie->list, pos);
715}
716
Johan Hedberg31754052012-01-04 13:39:52 +0200717bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300718 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
Johan Hedberg30883512012-01-04 14:16:21 +0200720 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200721 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300723 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Szymon Janc2b2fec42012-11-20 11:38:54 +0100725 hci_remove_remote_oob_data(hdev, &data->bdaddr);
726
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200727 if (ssp)
728 *ssp = data->ssp_mode;
729
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200730 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200731 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200732 if (ie->data.ssp_mode && ssp)
733 *ssp = true;
734
Johan Hedberga3d4e202012-01-09 00:53:02 +0200735 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300736 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200737 ie->data.rssi = data->rssi;
738 hci_inquiry_cache_update_resolve(hdev, ie);
739 }
740
Johan Hedberg561aafb2012-01-04 13:31:59 +0200741 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200742 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200743
Johan Hedberg561aafb2012-01-04 13:31:59 +0200744 /* Entry not in the cache. Add new one. */
745 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
746 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200747 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200748
749 list_add(&ie->all, &cache->all);
750
751 if (name_known) {
752 ie->name_state = NAME_KNOWN;
753 } else {
754 ie->name_state = NAME_NOT_KNOWN;
755 list_add(&ie->list, &cache->unknown);
756 }
757
758update:
759 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300760 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200761 ie->name_state = NAME_KNOWN;
762 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 }
764
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200765 memcpy(&ie->data, data, sizeof(*data));
766 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200768
769 if (ie->name_state == NAME_NOT_KNOWN)
770 return false;
771
772 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773}
774
775static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
776{
Johan Hedberg30883512012-01-04 14:16:21 +0200777 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 struct inquiry_info *info = (struct inquiry_info *) buf;
779 struct inquiry_entry *e;
780 int copied = 0;
781
Johan Hedberg561aafb2012-01-04 13:31:59 +0200782 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200784
785 if (copied >= num)
786 break;
787
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 bacpy(&info->bdaddr, &data->bdaddr);
789 info->pscan_rep_mode = data->pscan_rep_mode;
790 info->pscan_period_mode = data->pscan_period_mode;
791 info->pscan_mode = data->pscan_mode;
792 memcpy(info->dev_class, data->dev_class, 3);
793 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200796 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 }
798
799 BT_DBG("cache %p, copied %d", cache, copied);
800 return copied;
801}
802
Johan Hedberg42c6b122013-03-05 20:37:49 +0200803static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
805 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200806 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 struct hci_cp_inquiry cp;
808
809 BT_DBG("%s", hdev->name);
810
811 if (test_bit(HCI_INQUIRY, &hdev->flags))
812 return;
813
814 /* Start Inquiry */
815 memcpy(&cp.lap, &ir->lap, 3);
816 cp.length = ir->length;
817 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200818 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
820
Andre Guedes3e13fa12013-03-27 20:04:56 -0300821static int wait_inquiry(void *word)
822{
823 schedule();
824 return signal_pending(current);
825}
826
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827int hci_inquiry(void __user *arg)
828{
829 __u8 __user *ptr = arg;
830 struct hci_inquiry_req ir;
831 struct hci_dev *hdev;
832 int err = 0, do_inquiry = 0, max_rsp;
833 long timeo;
834 __u8 *buf;
835
836 if (copy_from_user(&ir, ptr, sizeof(ir)))
837 return -EFAULT;
838
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200839 hdev = hci_dev_get(ir.dev_id);
840 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 return -ENODEV;
842
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300843 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900844 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300845 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 inquiry_cache_flush(hdev);
847 do_inquiry = 1;
848 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300849 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
Marcel Holtmann04837f62006-07-03 10:02:33 +0200851 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200852
853 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200854 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
855 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200856 if (err < 0)
857 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300858
859 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
860 * cleared). If it is interrupted by a signal, return -EINTR.
861 */
862 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
863 TASK_INTERRUPTIBLE))
864 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300867 /* for unlimited number of responses we will use buffer with
868 * 255 entries
869 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
871
872 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
873 * copy it to the user space.
874 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100875 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200876 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 err = -ENOMEM;
878 goto done;
879 }
880
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300881 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300883 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885 BT_DBG("num_rsp %d", ir.num_rsp);
886
887 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
888 ptr += sizeof(ir);
889 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300890 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900892 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 err = -EFAULT;
894
895 kfree(buf);
896
897done:
898 hci_dev_put(hdev);
899 return err;
900}
901
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100902static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
903{
904 u8 ad_len = 0, flags = 0;
905 size_t name_len;
906
907 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
908 flags |= LE_AD_GENERAL;
909
910 if (!lmp_bredr_capable(hdev))
911 flags |= LE_AD_NO_BREDR;
912
913 if (lmp_le_br_capable(hdev))
914 flags |= LE_AD_SIM_LE_BREDR_CTRL;
915
916 if (lmp_host_le_br_capable(hdev))
917 flags |= LE_AD_SIM_LE_BREDR_HOST;
918
919 if (flags) {
920 BT_DBG("adv flags 0x%02x", flags);
921
922 ptr[0] = 2;
923 ptr[1] = EIR_FLAGS;
924 ptr[2] = flags;
925
926 ad_len += 3;
927 ptr += 3;
928 }
929
930 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
931 ptr[0] = 2;
932 ptr[1] = EIR_TX_POWER;
933 ptr[2] = (u8) hdev->adv_tx_power;
934
935 ad_len += 3;
936 ptr += 3;
937 }
938
939 name_len = strlen(hdev->dev_name);
940 if (name_len > 0) {
941 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
942
943 if (name_len > max_len) {
944 name_len = max_len;
945 ptr[1] = EIR_NAME_SHORT;
946 } else
947 ptr[1] = EIR_NAME_COMPLETE;
948
949 ptr[0] = name_len + 1;
950
951 memcpy(ptr + 2, hdev->dev_name, name_len);
952
953 ad_len += (name_len + 2);
954 ptr += (name_len + 2);
955 }
956
957 return ad_len;
958}
959
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500960void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100961{
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500962 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100963 struct hci_cp_le_set_adv_data cp;
964 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100965
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500966 if (!lmp_le_capable(hdev))
967 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100968
969 memset(&cp, 0, sizeof(cp));
970
971 len = create_ad(hdev, cp.data);
972
973 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500974 memcmp(cp.data, hdev->adv_data, len) == 0)
975 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100976
977 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
978 hdev->adv_data_len = len;
979
980 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100981
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500982 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100983}
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985/* ---- HCI ioctl helpers ---- */
986
987int hci_dev_open(__u16 dev)
988{
989 struct hci_dev *hdev;
990 int ret = 0;
991
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200992 hdev = hci_dev_get(dev);
993 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 return -ENODEV;
995
996 BT_DBG("%s %p", hdev->name, hdev);
997
998 hci_req_lock(hdev);
999
Johan Hovold94324962012-03-15 14:48:41 +01001000 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1001 ret = -ENODEV;
1002 goto done;
1003 }
1004
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001005 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1006 ret = -ERFKILL;
1007 goto done;
1008 }
1009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 if (test_bit(HCI_UP, &hdev->flags)) {
1011 ret = -EALREADY;
1012 goto done;
1013 }
1014
1015 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1016 set_bit(HCI_RAW, &hdev->flags);
1017
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +02001018 /* Treat all non BR/EDR controllers as raw devices if
1019 enable_hs is not set */
1020 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +01001021 set_bit(HCI_RAW, &hdev->flags);
1022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 if (hdev->open(hdev)) {
1024 ret = -EIO;
1025 goto done;
1026 }
1027
1028 if (!test_bit(HCI_RAW, &hdev->flags)) {
1029 atomic_set(&hdev->cmd_cnt, 1);
1030 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001031 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 clear_bit(HCI_INIT, &hdev->flags);
1033 }
1034
1035 if (!ret) {
1036 hci_dev_hold(hdev);
1037 set_bit(HCI_UP, &hdev->flags);
1038 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001039 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1040 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001041 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001042 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001043 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001044 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001045 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001047 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001048 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001049 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 skb_queue_purge(&hdev->cmd_q);
1052 skb_queue_purge(&hdev->rx_q);
1053
1054 if (hdev->flush)
1055 hdev->flush(hdev);
1056
1057 if (hdev->sent_cmd) {
1058 kfree_skb(hdev->sent_cmd);
1059 hdev->sent_cmd = NULL;
1060 }
1061
1062 hdev->close(hdev);
1063 hdev->flags = 0;
1064 }
1065
1066done:
1067 hci_req_unlock(hdev);
1068 hci_dev_put(hdev);
1069 return ret;
1070}
1071
1072static int hci_dev_do_close(struct hci_dev *hdev)
1073{
1074 BT_DBG("%s %p", hdev->name, hdev);
1075
Andre Guedes28b75a82012-02-03 17:48:00 -03001076 cancel_work_sync(&hdev->le_scan);
1077
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001078 cancel_delayed_work(&hdev->power_off);
1079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 hci_req_cancel(hdev, ENODEV);
1081 hci_req_lock(hdev);
1082
1083 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001084 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 hci_req_unlock(hdev);
1086 return 0;
1087 }
1088
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001089 /* Flush RX and TX works */
1090 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001091 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001093 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001094 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001095 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001096 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001097 }
1098
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001099 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001100 cancel_delayed_work(&hdev->service_cache);
1101
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001102 cancel_delayed_work_sync(&hdev->le_scan_disable);
1103
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001104 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 inquiry_cache_flush(hdev);
1106 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001107 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 hci_notify(hdev, HCI_DEV_DOWN);
1110
1111 if (hdev->flush)
1112 hdev->flush(hdev);
1113
1114 /* Reset device */
1115 skb_queue_purge(&hdev->cmd_q);
1116 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001117 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001118 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001120 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 clear_bit(HCI_INIT, &hdev->flags);
1122 }
1123
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001124 /* flush cmd work */
1125 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
1127 /* Drop queues */
1128 skb_queue_purge(&hdev->rx_q);
1129 skb_queue_purge(&hdev->cmd_q);
1130 skb_queue_purge(&hdev->raw_q);
1131
1132 /* Drop last sent command */
1133 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001134 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 kfree_skb(hdev->sent_cmd);
1136 hdev->sent_cmd = NULL;
1137 }
1138
1139 /* After this point our queues are empty
1140 * and no tasks are scheduled. */
1141 hdev->close(hdev);
1142
Johan Hedberg35b973c2013-03-15 17:06:59 -05001143 /* Clear flags */
1144 hdev->flags = 0;
1145 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1146
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001147 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1148 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001149 hci_dev_lock(hdev);
1150 mgmt_powered(hdev, 0);
1151 hci_dev_unlock(hdev);
1152 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001153
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001154 /* Controller radio is available but is currently powered down */
1155 hdev->amp_status = 0;
1156
Johan Hedberge59fda82012-02-22 18:11:53 +02001157 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001158 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 hci_req_unlock(hdev);
1161
1162 hci_dev_put(hdev);
1163 return 0;
1164}
1165
1166int hci_dev_close(__u16 dev)
1167{
1168 struct hci_dev *hdev;
1169 int err;
1170
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001171 hdev = hci_dev_get(dev);
1172 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001174
1175 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1176 cancel_delayed_work(&hdev->power_off);
1177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 hci_dev_put(hdev);
1181 return err;
1182}
1183
1184int hci_dev_reset(__u16 dev)
1185{
1186 struct hci_dev *hdev;
1187 int ret = 0;
1188
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001189 hdev = hci_dev_get(dev);
1190 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 return -ENODEV;
1192
1193 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
1195 if (!test_bit(HCI_UP, &hdev->flags))
1196 goto done;
1197
1198 /* Drop queues */
1199 skb_queue_purge(&hdev->rx_q);
1200 skb_queue_purge(&hdev->cmd_q);
1201
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001202 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 inquiry_cache_flush(hdev);
1204 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001205 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 if (hdev->flush)
1208 hdev->flush(hdev);
1209
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001210 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
1213 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 hci_req_unlock(hdev);
1218 hci_dev_put(hdev);
1219 return ret;
1220}
1221
1222int hci_dev_reset_stat(__u16 dev)
1223{
1224 struct hci_dev *hdev;
1225 int ret = 0;
1226
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001227 hdev = hci_dev_get(dev);
1228 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 return -ENODEV;
1230
1231 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1232
1233 hci_dev_put(hdev);
1234
1235 return ret;
1236}
1237
1238int hci_dev_cmd(unsigned int cmd, void __user *arg)
1239{
1240 struct hci_dev *hdev;
1241 struct hci_dev_req dr;
1242 int err = 0;
1243
1244 if (copy_from_user(&dr, arg, sizeof(dr)))
1245 return -EFAULT;
1246
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001247 hdev = hci_dev_get(dr.dev_id);
1248 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 return -ENODEV;
1250
1251 switch (cmd) {
1252 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001253 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1254 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 break;
1256
1257 case HCISETENCRYPT:
1258 if (!lmp_encrypt_capable(hdev)) {
1259 err = -EOPNOTSUPP;
1260 break;
1261 }
1262
1263 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1264 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001265 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1266 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 if (err)
1268 break;
1269 }
1270
Johan Hedberg01178cd2013-03-05 20:37:41 +02001271 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1272 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 break;
1274
1275 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001276 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1277 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 break;
1279
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001280 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001281 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1282 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001283 break;
1284
1285 case HCISETLINKMODE:
1286 hdev->link_mode = ((__u16) dr.dev_opt) &
1287 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1288 break;
1289
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 case HCISETPTYPE:
1291 hdev->pkt_type = (__u16) dr.dev_opt;
1292 break;
1293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001295 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1296 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 break;
1298
1299 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001300 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1301 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 break;
1303
1304 default:
1305 err = -EINVAL;
1306 break;
1307 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 hci_dev_put(hdev);
1310 return err;
1311}
1312
1313int hci_get_dev_list(void __user *arg)
1314{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001315 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 struct hci_dev_list_req *dl;
1317 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 int n = 0, size, err;
1319 __u16 dev_num;
1320
1321 if (get_user(dev_num, (__u16 __user *) arg))
1322 return -EFAULT;
1323
1324 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1325 return -EINVAL;
1326
1327 size = sizeof(*dl) + dev_num * sizeof(*dr);
1328
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001329 dl = kzalloc(size, GFP_KERNEL);
1330 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 return -ENOMEM;
1332
1333 dr = dl->dev_req;
1334
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001335 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001336 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001337 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001338 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001339
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001340 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1341 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 (dr + n)->dev_id = hdev->id;
1344 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 if (++n >= dev_num)
1347 break;
1348 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001349 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
1351 dl->dev_num = n;
1352 size = sizeof(*dl) + n * sizeof(*dr);
1353
1354 err = copy_to_user(arg, dl, size);
1355 kfree(dl);
1356
1357 return err ? -EFAULT : 0;
1358}
1359
1360int hci_get_dev_info(void __user *arg)
1361{
1362 struct hci_dev *hdev;
1363 struct hci_dev_info di;
1364 int err = 0;
1365
1366 if (copy_from_user(&di, arg, sizeof(di)))
1367 return -EFAULT;
1368
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001369 hdev = hci_dev_get(di.dev_id);
1370 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 return -ENODEV;
1372
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001373 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001374 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001375
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001376 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1377 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 strcpy(di.name, hdev->name);
1380 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001381 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 di.flags = hdev->flags;
1383 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001384 if (lmp_bredr_capable(hdev)) {
1385 di.acl_mtu = hdev->acl_mtu;
1386 di.acl_pkts = hdev->acl_pkts;
1387 di.sco_mtu = hdev->sco_mtu;
1388 di.sco_pkts = hdev->sco_pkts;
1389 } else {
1390 di.acl_mtu = hdev->le_mtu;
1391 di.acl_pkts = hdev->le_pkts;
1392 di.sco_mtu = 0;
1393 di.sco_pkts = 0;
1394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 di.link_policy = hdev->link_policy;
1396 di.link_mode = hdev->link_mode;
1397
1398 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1399 memcpy(&di.features, &hdev->features, sizeof(di.features));
1400
1401 if (copy_to_user(arg, &di, sizeof(di)))
1402 err = -EFAULT;
1403
1404 hci_dev_put(hdev);
1405
1406 return err;
1407}
1408
1409/* ---- Interface to HCI drivers ---- */
1410
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001411static int hci_rfkill_set_block(void *data, bool blocked)
1412{
1413 struct hci_dev *hdev = data;
1414
1415 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1416
1417 if (!blocked)
1418 return 0;
1419
1420 hci_dev_do_close(hdev);
1421
1422 return 0;
1423}
1424
1425static const struct rfkill_ops hci_rfkill_ops = {
1426 .set_block = hci_rfkill_set_block,
1427};
1428
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001429static void hci_power_on(struct work_struct *work)
1430{
1431 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1432
1433 BT_DBG("%s", hdev->name);
1434
1435 if (hci_dev_open(hdev->id) < 0)
1436 return;
1437
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001438 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001439 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1440 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001441
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001442 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001443 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001444}
1445
1446static void hci_power_off(struct work_struct *work)
1447{
Johan Hedberg32435532011-11-07 22:16:04 +02001448 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001449 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001450
1451 BT_DBG("%s", hdev->name);
1452
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001453 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001454}
1455
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001456static void hci_discov_off(struct work_struct *work)
1457{
1458 struct hci_dev *hdev;
1459 u8 scan = SCAN_PAGE;
1460
1461 hdev = container_of(work, struct hci_dev, discov_off.work);
1462
1463 BT_DBG("%s", hdev->name);
1464
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001465 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001466
1467 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1468
1469 hdev->discov_timeout = 0;
1470
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001471 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001472}
1473
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001474int hci_uuids_clear(struct hci_dev *hdev)
1475{
Johan Hedberg48210022013-01-27 00:31:28 +02001476 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001477
Johan Hedberg48210022013-01-27 00:31:28 +02001478 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1479 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001480 kfree(uuid);
1481 }
1482
1483 return 0;
1484}
1485
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001486int hci_link_keys_clear(struct hci_dev *hdev)
1487{
1488 struct list_head *p, *n;
1489
1490 list_for_each_safe(p, n, &hdev->link_keys) {
1491 struct link_key *key;
1492
1493 key = list_entry(p, struct link_key, list);
1494
1495 list_del(p);
1496 kfree(key);
1497 }
1498
1499 return 0;
1500}
1501
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001502int hci_smp_ltks_clear(struct hci_dev *hdev)
1503{
1504 struct smp_ltk *k, *tmp;
1505
1506 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1507 list_del(&k->list);
1508 kfree(k);
1509 }
1510
1511 return 0;
1512}
1513
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001514struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1515{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001516 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001517
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001518 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001519 if (bacmp(bdaddr, &k->bdaddr) == 0)
1520 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001521
1522 return NULL;
1523}
1524
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301525static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001526 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001527{
1528 /* Legacy key */
1529 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301530 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001531
1532 /* Debug keys are insecure so don't store them persistently */
1533 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301534 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001535
1536 /* Changed combination key and there's no previous one */
1537 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301538 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001539
1540 /* Security mode 3 case */
1541 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301542 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001543
1544 /* Neither local nor remote side had no-bonding as requirement */
1545 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301546 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001547
1548 /* Local side had dedicated bonding as requirement */
1549 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301550 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001551
1552 /* Remote side had dedicated bonding as requirement */
1553 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301554 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001555
1556 /* If none of the above criteria match, then don't store the key
1557 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301558 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001559}
1560
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001561struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001562{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001563 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001564
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001565 list_for_each_entry(k, &hdev->long_term_keys, list) {
1566 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001567 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001568 continue;
1569
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001570 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001571 }
1572
1573 return NULL;
1574}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001575
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001576struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001577 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001578{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001579 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001580
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001581 list_for_each_entry(k, &hdev->long_term_keys, list)
1582 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001583 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001584 return k;
1585
1586 return NULL;
1587}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001588
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001589int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001590 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001591{
1592 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301593 u8 old_key_type;
1594 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001595
1596 old_key = hci_find_link_key(hdev, bdaddr);
1597 if (old_key) {
1598 old_key_type = old_key->type;
1599 key = old_key;
1600 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001601 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001602 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1603 if (!key)
1604 return -ENOMEM;
1605 list_add(&key->list, &hdev->link_keys);
1606 }
1607
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001608 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001609
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001610 /* Some buggy controller combinations generate a changed
1611 * combination key for legacy pairing even when there's no
1612 * previous key */
1613 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001614 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001615 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001616 if (conn)
1617 conn->key_type = type;
1618 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001619
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001620 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001621 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001622 key->pin_len = pin_len;
1623
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001624 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001625 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001626 else
1627 key->type = type;
1628
Johan Hedberg4df378a2011-04-28 11:29:03 -07001629 if (!new_key)
1630 return 0;
1631
1632 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1633
Johan Hedberg744cf192011-11-08 20:40:14 +02001634 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001635
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301636 if (conn)
1637 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001638
1639 return 0;
1640}
1641
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001642int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001643 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001644 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001645{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001646 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001647
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001648 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1649 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001650
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001651 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1652 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001653 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001654 else {
1655 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001656 if (!key)
1657 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001658 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001659 }
1660
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001661 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001662 key->bdaddr_type = addr_type;
1663 memcpy(key->val, tk, sizeof(key->val));
1664 key->authenticated = authenticated;
1665 key->ediv = ediv;
1666 key->enc_size = enc_size;
1667 key->type = type;
1668 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001669
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001670 if (!new_key)
1671 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001672
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001673 if (type & HCI_SMP_LTK)
1674 mgmt_new_ltk(hdev, key, 1);
1675
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001676 return 0;
1677}
1678
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001679int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1680{
1681 struct link_key *key;
1682
1683 key = hci_find_link_key(hdev, bdaddr);
1684 if (!key)
1685 return -ENOENT;
1686
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001687 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001688
1689 list_del(&key->list);
1690 kfree(key);
1691
1692 return 0;
1693}
1694
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001695int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1696{
1697 struct smp_ltk *k, *tmp;
1698
1699 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1700 if (bacmp(bdaddr, &k->bdaddr))
1701 continue;
1702
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001703 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001704
1705 list_del(&k->list);
1706 kfree(k);
1707 }
1708
1709 return 0;
1710}
1711
Ville Tervo6bd32322011-02-16 16:32:41 +02001712/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001713static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001714{
1715 struct hci_dev *hdev = (void *) arg;
1716
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001717 if (hdev->sent_cmd) {
1718 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1719 u16 opcode = __le16_to_cpu(sent->opcode);
1720
1721 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1722 } else {
1723 BT_ERR("%s command tx timeout", hdev->name);
1724 }
1725
Ville Tervo6bd32322011-02-16 16:32:41 +02001726 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001727 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001728}
1729
Szymon Janc2763eda2011-03-22 13:12:22 +01001730struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001731 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001732{
1733 struct oob_data *data;
1734
1735 list_for_each_entry(data, &hdev->remote_oob_data, list)
1736 if (bacmp(bdaddr, &data->bdaddr) == 0)
1737 return data;
1738
1739 return NULL;
1740}
1741
1742int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1743{
1744 struct oob_data *data;
1745
1746 data = hci_find_remote_oob_data(hdev, bdaddr);
1747 if (!data)
1748 return -ENOENT;
1749
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001750 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001751
1752 list_del(&data->list);
1753 kfree(data);
1754
1755 return 0;
1756}
1757
1758int hci_remote_oob_data_clear(struct hci_dev *hdev)
1759{
1760 struct oob_data *data, *n;
1761
1762 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1763 list_del(&data->list);
1764 kfree(data);
1765 }
1766
1767 return 0;
1768}
1769
1770int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001771 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001772{
1773 struct oob_data *data;
1774
1775 data = hci_find_remote_oob_data(hdev, bdaddr);
1776
1777 if (!data) {
1778 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1779 if (!data)
1780 return -ENOMEM;
1781
1782 bacpy(&data->bdaddr, bdaddr);
1783 list_add(&data->list, &hdev->remote_oob_data);
1784 }
1785
1786 memcpy(data->hash, hash, sizeof(data->hash));
1787 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1788
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001789 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001790
1791 return 0;
1792}
1793
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001794struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001795{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001796 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001797
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001798 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001799 if (bacmp(bdaddr, &b->bdaddr) == 0)
1800 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001801
1802 return NULL;
1803}
1804
1805int hci_blacklist_clear(struct hci_dev *hdev)
1806{
1807 struct list_head *p, *n;
1808
1809 list_for_each_safe(p, n, &hdev->blacklist) {
1810 struct bdaddr_list *b;
1811
1812 b = list_entry(p, struct bdaddr_list, list);
1813
1814 list_del(p);
1815 kfree(b);
1816 }
1817
1818 return 0;
1819}
1820
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001821int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001822{
1823 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001824
1825 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1826 return -EBADF;
1827
Antti Julku5e762442011-08-25 16:48:02 +03001828 if (hci_blacklist_lookup(hdev, bdaddr))
1829 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001830
1831 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001832 if (!entry)
1833 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001834
1835 bacpy(&entry->bdaddr, bdaddr);
1836
1837 list_add(&entry->list, &hdev->blacklist);
1838
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001839 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001840}
1841
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001842int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001843{
1844 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001845
Szymon Janc1ec918c2011-11-16 09:32:21 +01001846 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001847 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001848
1849 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001850 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001851 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001852
1853 list_del(&entry->list);
1854 kfree(entry);
1855
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001856 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001857}
1858
Johan Hedberg42c6b122013-03-05 20:37:49 +02001859static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001860{
1861 struct le_scan_params *param = (struct le_scan_params *) opt;
1862 struct hci_cp_le_set_scan_param cp;
1863
1864 memset(&cp, 0, sizeof(cp));
1865 cp.type = param->type;
1866 cp.interval = cpu_to_le16(param->interval);
1867 cp.window = cpu_to_le16(param->window);
1868
Johan Hedberg42c6b122013-03-05 20:37:49 +02001869 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001870}
1871
Johan Hedberg42c6b122013-03-05 20:37:49 +02001872static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001873{
1874 struct hci_cp_le_set_scan_enable cp;
1875
1876 memset(&cp, 0, sizeof(cp));
1877 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001878 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001879
Johan Hedberg42c6b122013-03-05 20:37:49 +02001880 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001881}
1882
1883static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001884 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001885{
1886 long timeo = msecs_to_jiffies(3000);
1887 struct le_scan_params param;
1888 int err;
1889
1890 BT_DBG("%s", hdev->name);
1891
1892 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1893 return -EINPROGRESS;
1894
1895 param.type = type;
1896 param.interval = interval;
1897 param.window = window;
1898
1899 hci_req_lock(hdev);
1900
Johan Hedberg01178cd2013-03-05 20:37:41 +02001901 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1902 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001903 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02001904 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001905
1906 hci_req_unlock(hdev);
1907
1908 if (err < 0)
1909 return err;
1910
Johan Hedberg46818ed2013-01-14 22:33:52 +02001911 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1912 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001913
1914 return 0;
1915}
1916
Andre Guedes7dbfac12012-03-15 16:52:07 -03001917int hci_cancel_le_scan(struct hci_dev *hdev)
1918{
1919 BT_DBG("%s", hdev->name);
1920
1921 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1922 return -EALREADY;
1923
1924 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1925 struct hci_cp_le_set_scan_enable cp;
1926
1927 /* Send HCI command to disable LE Scan */
1928 memset(&cp, 0, sizeof(cp));
1929 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1930 }
1931
1932 return 0;
1933}
1934
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001935static void le_scan_disable_work(struct work_struct *work)
1936{
1937 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001938 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001939 struct hci_cp_le_set_scan_enable cp;
1940
1941 BT_DBG("%s", hdev->name);
1942
1943 memset(&cp, 0, sizeof(cp));
1944
1945 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1946}
1947
Andre Guedes28b75a82012-02-03 17:48:00 -03001948static void le_scan_work(struct work_struct *work)
1949{
1950 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1951 struct le_scan_params *param = &hdev->le_scan_params;
1952
1953 BT_DBG("%s", hdev->name);
1954
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001955 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1956 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001957}
1958
1959int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001960 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001961{
1962 struct le_scan_params *param = &hdev->le_scan_params;
1963
1964 BT_DBG("%s", hdev->name);
1965
Johan Hedbergf15504782012-10-24 21:12:03 +03001966 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1967 return -ENOTSUPP;
1968
Andre Guedes28b75a82012-02-03 17:48:00 -03001969 if (work_busy(&hdev->le_scan))
1970 return -EINPROGRESS;
1971
1972 param->type = type;
1973 param->interval = interval;
1974 param->window = window;
1975 param->timeout = timeout;
1976
1977 queue_work(system_long_wq, &hdev->le_scan);
1978
1979 return 0;
1980}
1981
David Herrmann9be0dab2012-04-22 14:39:57 +02001982/* Alloc HCI device */
1983struct hci_dev *hci_alloc_dev(void)
1984{
1985 struct hci_dev *hdev;
1986
1987 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1988 if (!hdev)
1989 return NULL;
1990
David Herrmannb1b813d2012-04-22 14:39:58 +02001991 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1992 hdev->esco_type = (ESCO_HV1);
1993 hdev->link_mode = (HCI_LM_ACCEPT);
1994 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01001995 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1996 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02001997
David Herrmannb1b813d2012-04-22 14:39:58 +02001998 hdev->sniff_max_interval = 800;
1999 hdev->sniff_min_interval = 80;
2000
2001 mutex_init(&hdev->lock);
2002 mutex_init(&hdev->req_lock);
2003
2004 INIT_LIST_HEAD(&hdev->mgmt_pending);
2005 INIT_LIST_HEAD(&hdev->blacklist);
2006 INIT_LIST_HEAD(&hdev->uuids);
2007 INIT_LIST_HEAD(&hdev->link_keys);
2008 INIT_LIST_HEAD(&hdev->long_term_keys);
2009 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002010 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002011
2012 INIT_WORK(&hdev->rx_work, hci_rx_work);
2013 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2014 INIT_WORK(&hdev->tx_work, hci_tx_work);
2015 INIT_WORK(&hdev->power_on, hci_power_on);
2016 INIT_WORK(&hdev->le_scan, le_scan_work);
2017
David Herrmannb1b813d2012-04-22 14:39:58 +02002018 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2019 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2020 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2021
David Herrmann9be0dab2012-04-22 14:39:57 +02002022 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02002023 skb_queue_head_init(&hdev->rx_q);
2024 skb_queue_head_init(&hdev->cmd_q);
2025 skb_queue_head_init(&hdev->raw_q);
2026
2027 init_waitqueue_head(&hdev->req_wait_q);
2028
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002029 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002030
David Herrmannb1b813d2012-04-22 14:39:58 +02002031 hci_init_sysfs(hdev);
2032 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002033
2034 return hdev;
2035}
2036EXPORT_SYMBOL(hci_alloc_dev);
2037
2038/* Free HCI device */
2039void hci_free_dev(struct hci_dev *hdev)
2040{
2041 skb_queue_purge(&hdev->driver_init);
2042
2043 /* will free via device release */
2044 put_device(&hdev->dev);
2045}
2046EXPORT_SYMBOL(hci_free_dev);
2047
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048/* Register HCI device */
2049int hci_register_dev(struct hci_dev *hdev)
2050{
David Herrmannb1b813d2012-04-22 14:39:58 +02002051 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
David Herrmann010666a2012-01-07 15:47:07 +01002053 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 return -EINVAL;
2055
Mat Martineau08add512011-11-02 16:18:36 -07002056 /* Do not allow HCI_AMP devices to register at index 0,
2057 * so the index can be used as the AMP controller ID.
2058 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002059 switch (hdev->dev_type) {
2060 case HCI_BREDR:
2061 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2062 break;
2063 case HCI_AMP:
2064 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2065 break;
2066 default:
2067 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002069
Sasha Levin3df92b32012-05-27 22:36:56 +02002070 if (id < 0)
2071 return id;
2072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 sprintf(hdev->name, "hci%d", id);
2074 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002075
2076 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2077
Sasha Levin3df92b32012-05-27 22:36:56 +02002078 write_lock(&hci_dev_list_lock);
2079 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002080 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002082 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002083 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002084 if (!hdev->workqueue) {
2085 error = -ENOMEM;
2086 goto err;
2087 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002088
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002089 hdev->req_workqueue = alloc_workqueue(hdev->name,
2090 WQ_HIGHPRI | WQ_UNBOUND |
2091 WQ_MEM_RECLAIM, 1);
2092 if (!hdev->req_workqueue) {
2093 destroy_workqueue(hdev->workqueue);
2094 error = -ENOMEM;
2095 goto err;
2096 }
2097
David Herrmann33ca9542011-10-08 14:58:49 +02002098 error = hci_add_sysfs(hdev);
2099 if (error < 0)
2100 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002102 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002103 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2104 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002105 if (hdev->rfkill) {
2106 if (rfkill_register(hdev->rfkill) < 0) {
2107 rfkill_destroy(hdev->rfkill);
2108 hdev->rfkill = NULL;
2109 }
2110 }
2111
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002112 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002113
2114 if (hdev->dev_type != HCI_AMP)
2115 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002118 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
Johan Hedberg19202572013-01-14 22:33:51 +02002120 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002121
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002123
David Herrmann33ca9542011-10-08 14:58:49 +02002124err_wqueue:
2125 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002126 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002127err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002128 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002129 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002130 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002131 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002132
David Herrmann33ca9542011-10-08 14:58:49 +02002133 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134}
2135EXPORT_SYMBOL(hci_register_dev);
2136
2137/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002138void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139{
Sasha Levin3df92b32012-05-27 22:36:56 +02002140 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002141
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002142 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
Johan Hovold94324962012-03-15 14:48:41 +01002144 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2145
Sasha Levin3df92b32012-05-27 22:36:56 +02002146 id = hdev->id;
2147
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002148 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002150 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 hci_dev_do_close(hdev);
2153
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302154 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002155 kfree_skb(hdev->reassembly[i]);
2156
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002157 cancel_work_sync(&hdev->power_on);
2158
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002159 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002160 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002161 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002162 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002163 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002164 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002165
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002166 /* mgmt_index_removed should take care of emptying the
2167 * pending list */
2168 BUG_ON(!list_empty(&hdev->mgmt_pending));
2169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 hci_notify(hdev, HCI_DEV_UNREG);
2171
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002172 if (hdev->rfkill) {
2173 rfkill_unregister(hdev->rfkill);
2174 rfkill_destroy(hdev->rfkill);
2175 }
2176
David Herrmannce242972011-10-08 14:58:48 +02002177 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002178
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002179 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002180 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002181
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002182 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002183 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002184 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002185 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002186 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002187 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002188 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002189
David Herrmanndc946bd2012-01-07 15:47:24 +01002190 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002191
2192 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193}
2194EXPORT_SYMBOL(hci_unregister_dev);
2195
2196/* Suspend HCI device */
2197int hci_suspend_dev(struct hci_dev *hdev)
2198{
2199 hci_notify(hdev, HCI_DEV_SUSPEND);
2200 return 0;
2201}
2202EXPORT_SYMBOL(hci_suspend_dev);
2203
2204/* Resume HCI device */
2205int hci_resume_dev(struct hci_dev *hdev)
2206{
2207 hci_notify(hdev, HCI_DEV_RESUME);
2208 return 0;
2209}
2210EXPORT_SYMBOL(hci_resume_dev);
2211
Marcel Holtmann76bca882009-11-18 00:40:39 +01002212/* Receive frame from HCI drivers */
2213int hci_recv_frame(struct sk_buff *skb)
2214{
2215 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2216 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002217 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002218 kfree_skb(skb);
2219 return -ENXIO;
2220 }
2221
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002222 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002223 bt_cb(skb)->incoming = 1;
2224
2225 /* Time stamp */
2226 __net_timestamp(skb);
2227
Marcel Holtmann76bca882009-11-18 00:40:39 +01002228 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002229 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002230
Marcel Holtmann76bca882009-11-18 00:40:39 +01002231 return 0;
2232}
2233EXPORT_SYMBOL(hci_recv_frame);
2234
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302235static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002236 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302237{
2238 int len = 0;
2239 int hlen = 0;
2240 int remain = count;
2241 struct sk_buff *skb;
2242 struct bt_skb_cb *scb;
2243
2244 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002245 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302246 return -EILSEQ;
2247
2248 skb = hdev->reassembly[index];
2249
2250 if (!skb) {
2251 switch (type) {
2252 case HCI_ACLDATA_PKT:
2253 len = HCI_MAX_FRAME_SIZE;
2254 hlen = HCI_ACL_HDR_SIZE;
2255 break;
2256 case HCI_EVENT_PKT:
2257 len = HCI_MAX_EVENT_SIZE;
2258 hlen = HCI_EVENT_HDR_SIZE;
2259 break;
2260 case HCI_SCODATA_PKT:
2261 len = HCI_MAX_SCO_SIZE;
2262 hlen = HCI_SCO_HDR_SIZE;
2263 break;
2264 }
2265
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002266 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302267 if (!skb)
2268 return -ENOMEM;
2269
2270 scb = (void *) skb->cb;
2271 scb->expect = hlen;
2272 scb->pkt_type = type;
2273
2274 skb->dev = (void *) hdev;
2275 hdev->reassembly[index] = skb;
2276 }
2277
2278 while (count) {
2279 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002280 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302281
2282 memcpy(skb_put(skb, len), data, len);
2283
2284 count -= len;
2285 data += len;
2286 scb->expect -= len;
2287 remain = count;
2288
2289 switch (type) {
2290 case HCI_EVENT_PKT:
2291 if (skb->len == HCI_EVENT_HDR_SIZE) {
2292 struct hci_event_hdr *h = hci_event_hdr(skb);
2293 scb->expect = h->plen;
2294
2295 if (skb_tailroom(skb) < scb->expect) {
2296 kfree_skb(skb);
2297 hdev->reassembly[index] = NULL;
2298 return -ENOMEM;
2299 }
2300 }
2301 break;
2302
2303 case HCI_ACLDATA_PKT:
2304 if (skb->len == HCI_ACL_HDR_SIZE) {
2305 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2306 scb->expect = __le16_to_cpu(h->dlen);
2307
2308 if (skb_tailroom(skb) < scb->expect) {
2309 kfree_skb(skb);
2310 hdev->reassembly[index] = NULL;
2311 return -ENOMEM;
2312 }
2313 }
2314 break;
2315
2316 case HCI_SCODATA_PKT:
2317 if (skb->len == HCI_SCO_HDR_SIZE) {
2318 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2319 scb->expect = h->dlen;
2320
2321 if (skb_tailroom(skb) < scb->expect) {
2322 kfree_skb(skb);
2323 hdev->reassembly[index] = NULL;
2324 return -ENOMEM;
2325 }
2326 }
2327 break;
2328 }
2329
2330 if (scb->expect == 0) {
2331 /* Complete frame */
2332
2333 bt_cb(skb)->pkt_type = type;
2334 hci_recv_frame(skb);
2335
2336 hdev->reassembly[index] = NULL;
2337 return remain;
2338 }
2339 }
2340
2341 return remain;
2342}
2343
Marcel Holtmannef222012007-07-11 06:42:04 +02002344int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2345{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302346 int rem = 0;
2347
Marcel Holtmannef222012007-07-11 06:42:04 +02002348 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2349 return -EILSEQ;
2350
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002351 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002352 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302353 if (rem < 0)
2354 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002355
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302356 data += (count - rem);
2357 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002358 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002359
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302360 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002361}
2362EXPORT_SYMBOL(hci_recv_fragment);
2363
Suraj Sumangala99811512010-07-14 13:02:19 +05302364#define STREAM_REASSEMBLY 0
2365
2366int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2367{
2368 int type;
2369 int rem = 0;
2370
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002371 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302372 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2373
2374 if (!skb) {
2375 struct { char type; } *pkt;
2376
2377 /* Start of the frame */
2378 pkt = data;
2379 type = pkt->type;
2380
2381 data++;
2382 count--;
2383 } else
2384 type = bt_cb(skb)->pkt_type;
2385
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002386 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002387 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302388 if (rem < 0)
2389 return rem;
2390
2391 data += (count - rem);
2392 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002393 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302394
2395 return rem;
2396}
2397EXPORT_SYMBOL(hci_recv_stream_fragment);
2398
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399/* ---- Interface to upper protocols ---- */
2400
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401int hci_register_cb(struct hci_cb *cb)
2402{
2403 BT_DBG("%p name %s", cb, cb->name);
2404
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002405 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002407 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408
2409 return 0;
2410}
2411EXPORT_SYMBOL(hci_register_cb);
2412
2413int hci_unregister_cb(struct hci_cb *cb)
2414{
2415 BT_DBG("%p name %s", cb, cb->name);
2416
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002417 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002419 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
2421 return 0;
2422}
2423EXPORT_SYMBOL(hci_unregister_cb);
2424
2425static int hci_send_frame(struct sk_buff *skb)
2426{
2427 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2428
2429 if (!hdev) {
2430 kfree_skb(skb);
2431 return -ENODEV;
2432 }
2433
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002434 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002436 /* Time stamp */
2437 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002439 /* Send copy to monitor */
2440 hci_send_to_monitor(hdev, skb);
2441
2442 if (atomic_read(&hdev->promisc)) {
2443 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002444 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 }
2446
2447 /* Get rid of skb owner, prior to sending to the driver. */
2448 skb_orphan(skb);
2449
2450 return hdev->send(skb);
2451}
2452
Johan Hedberg3119ae92013-03-05 20:37:44 +02002453void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2454{
2455 skb_queue_head_init(&req->cmd_q);
2456 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002457 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002458}
2459
2460int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2461{
2462 struct hci_dev *hdev = req->hdev;
2463 struct sk_buff *skb;
2464 unsigned long flags;
2465
2466 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2467
Andre Guedes5d73e032013-03-08 11:20:16 -03002468 /* If an error occured during request building, remove all HCI
2469 * commands queued on the HCI request queue.
2470 */
2471 if (req->err) {
2472 skb_queue_purge(&req->cmd_q);
2473 return req->err;
2474 }
2475
Johan Hedberg3119ae92013-03-05 20:37:44 +02002476 /* Do not allow empty requests */
2477 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002478 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002479
2480 skb = skb_peek_tail(&req->cmd_q);
2481 bt_cb(skb)->req.complete = complete;
2482
2483 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2484 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2485 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2486
2487 queue_work(hdev->workqueue, &hdev->cmd_work);
2488
2489 return 0;
2490}
2491
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002492static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2493 u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494{
2495 int len = HCI_COMMAND_HDR_SIZE + plen;
2496 struct hci_command_hdr *hdr;
2497 struct sk_buff *skb;
2498
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002500 if (!skb)
2501 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
2503 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002504 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 hdr->plen = plen;
2506
2507 if (plen)
2508 memcpy(skb_put(skb, plen), param, plen);
2509
2510 BT_DBG("skb len %d", skb->len);
2511
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002512 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002514
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002515 return skb;
2516}
2517
2518/* Send HCI command */
2519int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2520{
2521 struct sk_buff *skb;
2522
2523 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2524
2525 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2526 if (!skb) {
2527 BT_ERR("%s no memory for command", hdev->name);
2528 return -ENOMEM;
2529 }
2530
Johan Hedberg11714b32013-03-05 20:37:47 +02002531 /* Stand-alone HCI commands must be flaged as
2532 * single-command requests.
2533 */
2534 bt_cb(skb)->req.start = true;
2535
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002537 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
2539 return 0;
2540}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
Johan Hedberg71c76a12013-03-05 20:37:46 +02002542/* Queue a command to an asynchronous HCI request */
Andre Guedese348fe62013-03-08 11:20:17 -03002543void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002544{
2545 struct hci_dev *hdev = req->hdev;
2546 struct sk_buff *skb;
2547
2548 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2549
Andre Guedes34739c12013-03-08 11:20:18 -03002550 /* If an error occured during request building, there is no point in
2551 * queueing the HCI command. We can simply return.
2552 */
2553 if (req->err)
2554 return;
2555
Johan Hedberg71c76a12013-03-05 20:37:46 +02002556 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2557 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002558 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2559 hdev->name, opcode);
2560 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002561 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002562 }
2563
2564 if (skb_queue_empty(&req->cmd_q))
2565 bt_cb(skb)->req.start = true;
2566
2567 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002568}
2569
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002571void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572{
2573 struct hci_command_hdr *hdr;
2574
2575 if (!hdev->sent_cmd)
2576 return NULL;
2577
2578 hdr = (void *) hdev->sent_cmd->data;
2579
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002580 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 return NULL;
2582
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002583 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584
2585 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2586}
2587
2588/* Send ACL data */
2589static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2590{
2591 struct hci_acl_hdr *hdr;
2592 int len = skb->len;
2593
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002594 skb_push(skb, HCI_ACL_HDR_SIZE);
2595 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002596 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002597 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2598 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599}
2600
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002601static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002602 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002604 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 struct hci_dev *hdev = conn->hdev;
2606 struct sk_buff *list;
2607
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002608 skb->len = skb_headlen(skb);
2609 skb->data_len = 0;
2610
2611 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002612
2613 switch (hdev->dev_type) {
2614 case HCI_BREDR:
2615 hci_add_acl_hdr(skb, conn->handle, flags);
2616 break;
2617 case HCI_AMP:
2618 hci_add_acl_hdr(skb, chan->handle, flags);
2619 break;
2620 default:
2621 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2622 return;
2623 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002624
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002625 list = skb_shinfo(skb)->frag_list;
2626 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 /* Non fragmented */
2628 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2629
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002630 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 } else {
2632 /* Fragmented */
2633 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2634
2635 skb_shinfo(skb)->frag_list = NULL;
2636
2637 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002638 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002640 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002641
2642 flags &= ~ACL_START;
2643 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 do {
2645 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002646
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002648 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002649 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650
2651 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2652
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002653 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 } while (list);
2655
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002656 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002658}
2659
2660void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2661{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002662 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002663
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002664 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002665
2666 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002667
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002668 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002670 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
2673/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002674void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675{
2676 struct hci_dev *hdev = conn->hdev;
2677 struct hci_sco_hdr hdr;
2678
2679 BT_DBG("%s len %d", hdev->name, skb->len);
2680
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002681 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 hdr.dlen = skb->len;
2683
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002684 skb_push(skb, HCI_SCO_HDR_SIZE);
2685 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002686 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687
2688 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002689 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002690
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002692 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
2695/* ---- HCI TX task (outgoing data) ---- */
2696
2697/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002698static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2699 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700{
2701 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002702 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002703 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002705 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002707
2708 rcu_read_lock();
2709
2710 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002711 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002713
2714 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2715 continue;
2716
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 num++;
2718
2719 if (c->sent < min) {
2720 min = c->sent;
2721 conn = c;
2722 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002723
2724 if (hci_conn_num(hdev, type) == num)
2725 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 }
2727
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002728 rcu_read_unlock();
2729
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002731 int cnt, q;
2732
2733 switch (conn->type) {
2734 case ACL_LINK:
2735 cnt = hdev->acl_cnt;
2736 break;
2737 case SCO_LINK:
2738 case ESCO_LINK:
2739 cnt = hdev->sco_cnt;
2740 break;
2741 case LE_LINK:
2742 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2743 break;
2744 default:
2745 cnt = 0;
2746 BT_ERR("Unknown link type");
2747 }
2748
2749 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 *quote = q ? q : 1;
2751 } else
2752 *quote = 0;
2753
2754 BT_DBG("conn %p quote %d", conn, *quote);
2755 return conn;
2756}
2757
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002758static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759{
2760 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002761 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
Ville Tervobae1f5d92011-02-10 22:38:53 -03002763 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002765 rcu_read_lock();
2766
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002768 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002769 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002770 BT_ERR("%s killing stalled connection %pMR",
2771 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002772 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 }
2774 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002775
2776 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777}
2778
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002779static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2780 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002781{
2782 struct hci_conn_hash *h = &hdev->conn_hash;
2783 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002784 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002785 struct hci_conn *conn;
2786 int cnt, q, conn_num = 0;
2787
2788 BT_DBG("%s", hdev->name);
2789
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002790 rcu_read_lock();
2791
2792 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002793 struct hci_chan *tmp;
2794
2795 if (conn->type != type)
2796 continue;
2797
2798 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2799 continue;
2800
2801 conn_num++;
2802
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002803 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002804 struct sk_buff *skb;
2805
2806 if (skb_queue_empty(&tmp->data_q))
2807 continue;
2808
2809 skb = skb_peek(&tmp->data_q);
2810 if (skb->priority < cur_prio)
2811 continue;
2812
2813 if (skb->priority > cur_prio) {
2814 num = 0;
2815 min = ~0;
2816 cur_prio = skb->priority;
2817 }
2818
2819 num++;
2820
2821 if (conn->sent < min) {
2822 min = conn->sent;
2823 chan = tmp;
2824 }
2825 }
2826
2827 if (hci_conn_num(hdev, type) == conn_num)
2828 break;
2829 }
2830
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002831 rcu_read_unlock();
2832
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002833 if (!chan)
2834 return NULL;
2835
2836 switch (chan->conn->type) {
2837 case ACL_LINK:
2838 cnt = hdev->acl_cnt;
2839 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002840 case AMP_LINK:
2841 cnt = hdev->block_cnt;
2842 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002843 case SCO_LINK:
2844 case ESCO_LINK:
2845 cnt = hdev->sco_cnt;
2846 break;
2847 case LE_LINK:
2848 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2849 break;
2850 default:
2851 cnt = 0;
2852 BT_ERR("Unknown link type");
2853 }
2854
2855 q = cnt / num;
2856 *quote = q ? q : 1;
2857 BT_DBG("chan %p quote %d", chan, *quote);
2858 return chan;
2859}
2860
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002861static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2862{
2863 struct hci_conn_hash *h = &hdev->conn_hash;
2864 struct hci_conn *conn;
2865 int num = 0;
2866
2867 BT_DBG("%s", hdev->name);
2868
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002869 rcu_read_lock();
2870
2871 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002872 struct hci_chan *chan;
2873
2874 if (conn->type != type)
2875 continue;
2876
2877 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2878 continue;
2879
2880 num++;
2881
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002882 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002883 struct sk_buff *skb;
2884
2885 if (chan->sent) {
2886 chan->sent = 0;
2887 continue;
2888 }
2889
2890 if (skb_queue_empty(&chan->data_q))
2891 continue;
2892
2893 skb = skb_peek(&chan->data_q);
2894 if (skb->priority >= HCI_PRIO_MAX - 1)
2895 continue;
2896
2897 skb->priority = HCI_PRIO_MAX - 1;
2898
2899 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002900 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002901 }
2902
2903 if (hci_conn_num(hdev, type) == num)
2904 break;
2905 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002906
2907 rcu_read_unlock();
2908
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002909}
2910
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002911static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2912{
2913 /* Calculate count of blocks used by this packet */
2914 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2915}
2916
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002917static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 if (!test_bit(HCI_RAW, &hdev->flags)) {
2920 /* ACL tx timeout must be longer than maximum
2921 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002922 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002923 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002924 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002926}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002928static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002929{
2930 unsigned int cnt = hdev->acl_cnt;
2931 struct hci_chan *chan;
2932 struct sk_buff *skb;
2933 int quote;
2934
2935 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002936
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002937 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002938 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002939 u32 priority = (skb_peek(&chan->data_q))->priority;
2940 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002941 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002942 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002943
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002944 /* Stop if priority has changed */
2945 if (skb->priority < priority)
2946 break;
2947
2948 skb = skb_dequeue(&chan->data_q);
2949
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002950 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002951 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002952
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 hci_send_frame(skb);
2954 hdev->acl_last_tx = jiffies;
2955
2956 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002957 chan->sent++;
2958 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 }
2960 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002961
2962 if (cnt != hdev->acl_cnt)
2963 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964}
2965
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002966static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002967{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002968 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002969 struct hci_chan *chan;
2970 struct sk_buff *skb;
2971 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002972 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002973
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002974 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002975
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002976 BT_DBG("%s", hdev->name);
2977
2978 if (hdev->dev_type == HCI_AMP)
2979 type = AMP_LINK;
2980 else
2981 type = ACL_LINK;
2982
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002983 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002984 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002985 u32 priority = (skb_peek(&chan->data_q))->priority;
2986 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2987 int blocks;
2988
2989 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002990 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002991
2992 /* Stop if priority has changed */
2993 if (skb->priority < priority)
2994 break;
2995
2996 skb = skb_dequeue(&chan->data_q);
2997
2998 blocks = __get_blocks(hdev, skb);
2999 if (blocks > hdev->block_cnt)
3000 return;
3001
3002 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003003 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003004
3005 hci_send_frame(skb);
3006 hdev->acl_last_tx = jiffies;
3007
3008 hdev->block_cnt -= blocks;
3009 quote -= blocks;
3010
3011 chan->sent += blocks;
3012 chan->conn->sent += blocks;
3013 }
3014 }
3015
3016 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003017 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003018}
3019
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003020static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003021{
3022 BT_DBG("%s", hdev->name);
3023
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003024 /* No ACL link over BR/EDR controller */
3025 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3026 return;
3027
3028 /* No AMP link over AMP controller */
3029 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003030 return;
3031
3032 switch (hdev->flow_ctl_mode) {
3033 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3034 hci_sched_acl_pkt(hdev);
3035 break;
3036
3037 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3038 hci_sched_acl_blk(hdev);
3039 break;
3040 }
3041}
3042
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003044static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045{
3046 struct hci_conn *conn;
3047 struct sk_buff *skb;
3048 int quote;
3049
3050 BT_DBG("%s", hdev->name);
3051
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003052 if (!hci_conn_num(hdev, SCO_LINK))
3053 return;
3054
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3056 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3057 BT_DBG("skb %p len %d", skb, skb->len);
3058 hci_send_frame(skb);
3059
3060 conn->sent++;
3061 if (conn->sent == ~0)
3062 conn->sent = 0;
3063 }
3064 }
3065}
3066
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003067static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003068{
3069 struct hci_conn *conn;
3070 struct sk_buff *skb;
3071 int quote;
3072
3073 BT_DBG("%s", hdev->name);
3074
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003075 if (!hci_conn_num(hdev, ESCO_LINK))
3076 return;
3077
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003078 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3079 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003080 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3081 BT_DBG("skb %p len %d", skb, skb->len);
3082 hci_send_frame(skb);
3083
3084 conn->sent++;
3085 if (conn->sent == ~0)
3086 conn->sent = 0;
3087 }
3088 }
3089}
3090
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003091static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003092{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003093 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003094 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003095 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003096
3097 BT_DBG("%s", hdev->name);
3098
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003099 if (!hci_conn_num(hdev, LE_LINK))
3100 return;
3101
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003102 if (!test_bit(HCI_RAW, &hdev->flags)) {
3103 /* LE tx timeout must be longer than maximum
3104 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003105 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003106 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003107 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003108 }
3109
3110 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003111 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003112 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003113 u32 priority = (skb_peek(&chan->data_q))->priority;
3114 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003115 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003116 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003117
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003118 /* Stop if priority has changed */
3119 if (skb->priority < priority)
3120 break;
3121
3122 skb = skb_dequeue(&chan->data_q);
3123
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003124 hci_send_frame(skb);
3125 hdev->le_last_tx = jiffies;
3126
3127 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003128 chan->sent++;
3129 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003130 }
3131 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003132
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003133 if (hdev->le_pkts)
3134 hdev->le_cnt = cnt;
3135 else
3136 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003137
3138 if (cnt != tmp)
3139 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003140}
3141
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003142static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003144 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 struct sk_buff *skb;
3146
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003147 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003148 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149
3150 /* Schedule queues and send stuff to HCI driver */
3151
3152 hci_sched_acl(hdev);
3153
3154 hci_sched_sco(hdev);
3155
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003156 hci_sched_esco(hdev);
3157
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003158 hci_sched_le(hdev);
3159
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 /* Send next queued raw (unknown type) packet */
3161 while ((skb = skb_dequeue(&hdev->raw_q)))
3162 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163}
3164
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003165/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166
3167/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003168static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169{
3170 struct hci_acl_hdr *hdr = (void *) skb->data;
3171 struct hci_conn *conn;
3172 __u16 handle, flags;
3173
3174 skb_pull(skb, HCI_ACL_HDR_SIZE);
3175
3176 handle = __le16_to_cpu(hdr->handle);
3177 flags = hci_flags(handle);
3178 handle = hci_handle(handle);
3179
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003180 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003181 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182
3183 hdev->stat.acl_rx++;
3184
3185 hci_dev_lock(hdev);
3186 conn = hci_conn_hash_lookup_handle(hdev, handle);
3187 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003188
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003190 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003191
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003193 l2cap_recv_acldata(conn, skb, flags);
3194 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003196 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003197 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 }
3199
3200 kfree_skb(skb);
3201}
3202
3203/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003204static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205{
3206 struct hci_sco_hdr *hdr = (void *) skb->data;
3207 struct hci_conn *conn;
3208 __u16 handle;
3209
3210 skb_pull(skb, HCI_SCO_HDR_SIZE);
3211
3212 handle = __le16_to_cpu(hdr->handle);
3213
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003214 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215
3216 hdev->stat.sco_rx++;
3217
3218 hci_dev_lock(hdev);
3219 conn = hci_conn_hash_lookup_handle(hdev, handle);
3220 hci_dev_unlock(hdev);
3221
3222 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003224 sco_recv_scodata(conn, skb);
3225 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003227 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003228 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 }
3230
3231 kfree_skb(skb);
3232}
3233
Johan Hedberg9238f362013-03-05 20:37:48 +02003234static bool hci_req_is_complete(struct hci_dev *hdev)
3235{
3236 struct sk_buff *skb;
3237
3238 skb = skb_peek(&hdev->cmd_q);
3239 if (!skb)
3240 return true;
3241
3242 return bt_cb(skb)->req.start;
3243}
3244
Johan Hedberg42c6b122013-03-05 20:37:49 +02003245static void hci_resend_last(struct hci_dev *hdev)
3246{
3247 struct hci_command_hdr *sent;
3248 struct sk_buff *skb;
3249 u16 opcode;
3250
3251 if (!hdev->sent_cmd)
3252 return;
3253
3254 sent = (void *) hdev->sent_cmd->data;
3255 opcode = __le16_to_cpu(sent->opcode);
3256 if (opcode == HCI_OP_RESET)
3257 return;
3258
3259 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3260 if (!skb)
3261 return;
3262
3263 skb_queue_head(&hdev->cmd_q, skb);
3264 queue_work(hdev->workqueue, &hdev->cmd_work);
3265}
3266
Johan Hedberg9238f362013-03-05 20:37:48 +02003267void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3268{
3269 hci_req_complete_t req_complete = NULL;
3270 struct sk_buff *skb;
3271 unsigned long flags;
3272
3273 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3274
Johan Hedberg42c6b122013-03-05 20:37:49 +02003275 /* If the completed command doesn't match the last one that was
3276 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003277 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003278 if (!hci_sent_cmd_data(hdev, opcode)) {
3279 /* Some CSR based controllers generate a spontaneous
3280 * reset complete event during init and any pending
3281 * command will never be completed. In such a case we
3282 * need to resend whatever was the last sent
3283 * command.
3284 */
3285 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3286 hci_resend_last(hdev);
3287
Johan Hedberg9238f362013-03-05 20:37:48 +02003288 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003289 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003290
3291 /* If the command succeeded and there's still more commands in
3292 * this request the request is not yet complete.
3293 */
3294 if (!status && !hci_req_is_complete(hdev))
3295 return;
3296
3297 /* If this was the last command in a request the complete
3298 * callback would be found in hdev->sent_cmd instead of the
3299 * command queue (hdev->cmd_q).
3300 */
3301 if (hdev->sent_cmd) {
3302 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3303 if (req_complete)
3304 goto call_complete;
3305 }
3306
3307 /* Remove all pending commands belonging to this request */
3308 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3309 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3310 if (bt_cb(skb)->req.start) {
3311 __skb_queue_head(&hdev->cmd_q, skb);
3312 break;
3313 }
3314
3315 req_complete = bt_cb(skb)->req.complete;
3316 kfree_skb(skb);
3317 }
3318 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3319
3320call_complete:
3321 if (req_complete)
3322 req_complete(hdev, status);
3323}
3324
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003325static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003327 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 struct sk_buff *skb;
3329
3330 BT_DBG("%s", hdev->name);
3331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003333 /* Send copy to monitor */
3334 hci_send_to_monitor(hdev, skb);
3335
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 if (atomic_read(&hdev->promisc)) {
3337 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003338 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 }
3340
3341 if (test_bit(HCI_RAW, &hdev->flags)) {
3342 kfree_skb(skb);
3343 continue;
3344 }
3345
3346 if (test_bit(HCI_INIT, &hdev->flags)) {
3347 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003348 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 case HCI_ACLDATA_PKT:
3350 case HCI_SCODATA_PKT:
3351 kfree_skb(skb);
3352 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 }
3355
3356 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003357 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003359 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 hci_event_packet(hdev, skb);
3361 break;
3362
3363 case HCI_ACLDATA_PKT:
3364 BT_DBG("%s ACL data packet", hdev->name);
3365 hci_acldata_packet(hdev, skb);
3366 break;
3367
3368 case HCI_SCODATA_PKT:
3369 BT_DBG("%s SCO data packet", hdev->name);
3370 hci_scodata_packet(hdev, skb);
3371 break;
3372
3373 default:
3374 kfree_skb(skb);
3375 break;
3376 }
3377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378}
3379
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003380static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003382 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383 struct sk_buff *skb;
3384
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003385 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3386 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003389 if (atomic_read(&hdev->cmd_cnt)) {
3390 skb = skb_dequeue(&hdev->cmd_q);
3391 if (!skb)
3392 return;
3393
Wei Yongjun7585b972009-02-25 18:29:52 +08003394 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003396 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3397 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 atomic_dec(&hdev->cmd_cnt);
3399 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003400 if (test_bit(HCI_RESET, &hdev->flags))
3401 del_timer(&hdev->cmd_timer);
3402 else
3403 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003404 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 } else {
3406 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003407 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 }
3409 }
3410}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003411
3412int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3413{
3414 /* General inquiry access code (GIAC) */
3415 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3416 struct hci_cp_inquiry cp;
3417
3418 BT_DBG("%s", hdev->name);
3419
3420 if (test_bit(HCI_INQUIRY, &hdev->flags))
3421 return -EINPROGRESS;
3422
Johan Hedberg46632622012-01-02 16:06:08 +02003423 inquiry_cache_flush(hdev);
3424
Andre Guedes2519a1f2011-11-07 11:45:24 -03003425 memset(&cp, 0, sizeof(cp));
3426 memcpy(&cp.lap, lap, sizeof(cp.lap));
3427 cp.length = length;
3428
3429 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3430}
Andre Guedes023d50492011-11-04 14:16:52 -03003431
3432int hci_cancel_inquiry(struct hci_dev *hdev)
3433{
3434 BT_DBG("%s", hdev->name);
3435
3436 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003437 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003438
3439 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3440}
Andre Guedes31f79562012-04-24 21:02:53 -03003441
3442u8 bdaddr_to_le(u8 bdaddr_type)
3443{
3444 switch (bdaddr_type) {
3445 case BDADDR_LE_PUBLIC:
3446 return ADDR_LE_DEV_PUBLIC;
3447
3448 default:
3449 /* Fallback to LE Random address type */
3450 return ADDR_LE_DEV_RANDOM;
3451 }
3452}