blob: 5c64398472868acd2efbed56db78053b168a23e5 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +020083static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +020084 void (*func)(struct hci_request *req,
85 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +020086 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Johan Hedberg42c6b122013-03-05 20:37:49 +020088 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
Johan Hedberg42c6b122013-03-05 20:37:49 +020094 hci_req_init(&req, hdev);
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 hdev->req_status = HCI_REQ_PEND;
97
Johan Hedberg42c6b122013-03-05 20:37:49 +020098 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +020099
Johan Hedberg42c6b122013-03-05 20:37:49 +0200100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200102 hdev->req_status = 0;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200103 /* req_run will fail if the request did not add any
104 * commands to the queue, something that can happen when
105 * a request with conditionals doesn't trigger any
106 * commands to be sent. This is normal behavior and
107 * should not trigger an error return.
108 */
109 return 0;
Johan Hedberg53cce222013-03-05 20:37:42 +0200110 }
111
Andre Guedesbc4445c2013-03-08 11:20:13 -0300112 add_wait_queue(&hdev->req_wait_q, &wait);
113 set_current_state(TASK_INTERRUPTIBLE);
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 schedule_timeout(timeout);
116
117 remove_wait_queue(&hdev->req_wait_q, &wait);
118
119 if (signal_pending(current))
120 return -EINTR;
121
122 switch (hdev->req_status) {
123 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700124 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 break;
126
127 case HCI_REQ_CANCELED:
128 err = -hdev->req_result;
129 break;
130
131 default:
132 err = -ETIMEDOUT;
133 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Johan Hedberga5040ef2011-01-10 13:28:59 +0200136 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
138 BT_DBG("%s end: err %d", hdev->name, err);
139
140 return err;
141}
142
Johan Hedberg01178cd2013-03-05 20:37:41 +0200143static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200144 void (*req)(struct hci_request *req,
145 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200146 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
148 int ret;
149
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200150 if (!test_bit(HCI_UP, &hdev->flags))
151 return -ENETDOWN;
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 /* Serialize all requests */
154 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200155 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 hci_req_unlock(hdev);
157
158 return ret;
159}
160
Johan Hedberg42c6b122013-03-05 20:37:49 +0200161static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200163 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200166 set_bit(HCI_RESET, &req->hdev->flags);
167 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168}
169
Johan Hedberg42c6b122013-03-05 20:37:49 +0200170static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200172 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200175 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200177 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200178 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200179
180 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200181 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
Johan Hedberg42c6b122013-03-05 20:37:49 +0200184static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200185{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200186 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200187
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200188 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200189 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300190
191 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200192 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300193
194 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200195 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200196}
197
Johan Hedberg42c6b122013-03-05 20:37:49 +0200198static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200199{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 struct hci_dev *hdev = req->hdev;
201 struct hci_request init_req;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 struct sk_buff *skb;
203
204 BT_DBG("%s %ld", hdev->name, opt);
205
206 /* Driver initialization */
207
Johan Hedberg42c6b122013-03-05 20:37:49 +0200208 hci_req_init(&init_req, hdev);
209
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200210 /* Special commands */
211 while ((skb = skb_dequeue(&hdev->driver_init))) {
212 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
213 skb->dev = (void *) hdev;
214
Johan Hedberg42c6b122013-03-05 20:37:49 +0200215 if (skb_queue_empty(&init_req.cmd_q))
216 bt_cb(skb)->req.start = true;
217
218 skb_queue_tail(&init_req.cmd_q, skb);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200219 }
220 skb_queue_purge(&hdev->driver_init);
221
Johan Hedberg42c6b122013-03-05 20:37:49 +0200222 hci_req_run(&init_req, NULL);
223
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300224 /* Reset */
225 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200226 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300227
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200228 switch (hdev->dev_type) {
229 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200230 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200231 break;
232
233 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200234 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200235 break;
236
237 default:
238 BT_ERR("Unknown device type %d", hdev->dev_type);
239 break;
240 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200241}
242
Johan Hedberg42c6b122013-03-05 20:37:49 +0200243static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200244{
245 struct hci_cp_delete_stored_link_key cp;
246 __le16 param;
247 __u8 flt_type;
248
249 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200250 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200251
252 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200253 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200254
255 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200256 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200257
258 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200259 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200260
261 /* Clear Event Filters */
262 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200264
265 /* Connection accept timeout ~20 secs */
266 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200267 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200268
269 bacpy(&cp.bdaddr, BDADDR_ANY);
270 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200271 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200272}
273
Johan Hedberg42c6b122013-03-05 20:37:49 +0200274static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200275{
276 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200277 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200278
279 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200281
282 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200283 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200284
285 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200286 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200287
288 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200290}
291
292static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
293{
294 if (lmp_ext_inq_capable(hdev))
295 return 0x02;
296
297 if (lmp_inq_rssi_capable(hdev))
298 return 0x01;
299
300 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
301 hdev->lmp_subver == 0x0757)
302 return 0x01;
303
304 if (hdev->manufacturer == 15) {
305 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
306 return 0x01;
307 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
308 return 0x01;
309 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
310 return 0x01;
311 }
312
313 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
314 hdev->lmp_subver == 0x1805)
315 return 0x01;
316
317 return 0x00;
318}
319
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200321{
322 u8 mode;
323
Johan Hedberg42c6b122013-03-05 20:37:49 +0200324 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200325
Johan Hedberg42c6b122013-03-05 20:37:49 +0200326 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
332
Johan Hedberg2177bab2013-03-05 20:37:43 +0200333 /* The second byte is 0xff instead of 0x9f (two reserved bits
334 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335 * command otherwise.
336 */
337 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
338
339 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
340 * any event mask for pre 1.2 devices.
341 */
342 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343 return;
344
345 if (lmp_bredr_capable(hdev)) {
346 events[4] |= 0x01; /* Flow Specification Complete */
347 events[4] |= 0x02; /* Inquiry Result with RSSI */
348 events[4] |= 0x04; /* Read Remote Extended Features Complete */
349 events[5] |= 0x08; /* Synchronous Connection Complete */
350 events[5] |= 0x10; /* Synchronous Connection Changed */
351 }
352
353 if (lmp_inq_rssi_capable(hdev))
354 events[4] |= 0x02; /* Inquiry Result with RSSI */
355
356 if (lmp_sniffsubr_capable(hdev))
357 events[5] |= 0x20; /* Sniff Subrating */
358
359 if (lmp_pause_enc_capable(hdev))
360 events[5] |= 0x80; /* Encryption Key Refresh Complete */
361
362 if (lmp_ext_inq_capable(hdev))
363 events[5] |= 0x40; /* Extended Inquiry Result */
364
365 if (lmp_no_flush_capable(hdev))
366 events[7] |= 0x01; /* Enhanced Flush Complete */
367
368 if (lmp_lsto_capable(hdev))
369 events[6] |= 0x80; /* Link Supervision Timeout Changed */
370
371 if (lmp_ssp_capable(hdev)) {
372 events[6] |= 0x01; /* IO Capability Request */
373 events[6] |= 0x02; /* IO Capability Response */
374 events[6] |= 0x04; /* User Confirmation Request */
375 events[6] |= 0x08; /* User Passkey Request */
376 events[6] |= 0x10; /* Remote OOB Data Request */
377 events[6] |= 0x20; /* Simple Pairing Complete */
378 events[7] |= 0x04; /* User Passkey Notification */
379 events[7] |= 0x08; /* Keypress Notification */
380 events[7] |= 0x10; /* Remote Host Supported
381 * Features Notification
382 */
383 }
384
385 if (lmp_le_capable(hdev))
386 events[7] |= 0x20; /* LE Meta-Event */
387
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 if (lmp_le_capable(hdev)) {
391 memset(events, 0, sizeof(events));
392 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
394 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200395 }
396}
397
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200399{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 struct hci_dev *hdev = req->hdev;
401
Johan Hedberg2177bab2013-03-05 20:37:43 +0200402 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200404
405 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200406 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200407
Johan Hedberg42c6b122013-03-05 20:37:49 +0200408 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200409
410 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200411 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200412
413 if (lmp_ssp_capable(hdev)) {
414 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
415 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200416 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
417 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200418 } else {
419 struct hci_cp_write_eir cp;
420
421 memset(hdev->eir, 0, sizeof(hdev->eir));
422 memset(&cp, 0, sizeof(cp));
423
Johan Hedberg42c6b122013-03-05 20:37:49 +0200424 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200425 }
426 }
427
428 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200429 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200430
431 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433
434 if (lmp_ext_feat_capable(hdev)) {
435 struct hci_cp_read_local_ext_features cp;
436
437 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
439 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200440 }
441
442 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
443 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
445 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446 }
447}
448
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200450{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200451 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200452 struct hci_cp_write_def_link_policy cp;
453 u16 link_policy = 0;
454
455 if (lmp_rswitch_capable(hdev))
456 link_policy |= HCI_LP_RSWITCH;
457 if (lmp_hold_capable(hdev))
458 link_policy |= HCI_LP_HOLD;
459 if (lmp_sniff_capable(hdev))
460 link_policy |= HCI_LP_SNIFF;
461 if (lmp_park_capable(hdev))
462 link_policy |= HCI_LP_PARK;
463
464 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200465 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200466}
467
Johan Hedberg42c6b122013-03-05 20:37:49 +0200468static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200470 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200471 struct hci_cp_write_le_host_supported cp;
472
473 memset(&cp, 0, sizeof(cp));
474
475 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
476 cp.le = 0x01;
477 cp.simul = lmp_le_br_capable(hdev);
478 }
479
480 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
482 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200483}
484
Johan Hedberg42c6b122013-03-05 20:37:49 +0200485static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200486{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200487 struct hci_dev *hdev = req->hdev;
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
492 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200493 hci_set_le_support(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200494}
495
496static int __hci_init(struct hci_dev *hdev)
497{
498 int err;
499
500 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
501 if (err < 0)
502 return err;
503
504 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
505 * BR/EDR/LE type controllers. AMP controllers only need the
506 * first stage init.
507 */
508 if (hdev->dev_type != HCI_BREDR)
509 return 0;
510
511 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
512 if (err < 0)
513 return err;
514
515 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
516}
517
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
520 __u8 scan = opt;
521
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526}
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529{
530 __u8 auth = opt;
531
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}
537
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
540 __u8 encrypt = opt;
541
Johan Hedberg42c6b122013-03-05 20:37:49 +0200542 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200544 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
547
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200549{
550 __le16 policy = cpu_to_le16(opt);
551
Johan Hedberg42c6b122013-03-05 20:37:49 +0200552 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200553
554 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200556}
557
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900558/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 * Device is held on return. */
560struct hci_dev *hci_dev_get(int index)
561{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200562 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
564 BT_DBG("%d", index);
565
566 if (index < 0)
567 return NULL;
568
569 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200570 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 if (d->id == index) {
572 hdev = hci_dev_hold(d);
573 break;
574 }
575 }
576 read_unlock(&hci_dev_list_lock);
577 return hdev;
578}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200581
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200582bool hci_discovery_active(struct hci_dev *hdev)
583{
584 struct discovery_state *discov = &hdev->discovery;
585
Andre Guedes6fbe1952012-02-03 17:47:58 -0300586 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300587 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300588 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200589 return true;
590
Andre Guedes6fbe1952012-02-03 17:47:58 -0300591 default:
592 return false;
593 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200594}
595
Johan Hedbergff9ef572012-01-04 14:23:45 +0200596void hci_discovery_set_state(struct hci_dev *hdev, int state)
597{
598 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
599
600 if (hdev->discovery.state == state)
601 return;
602
603 switch (state) {
604 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300605 if (hdev->discovery.state != DISCOVERY_STARTING)
606 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200607 break;
608 case DISCOVERY_STARTING:
609 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300610 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200611 mgmt_discovering(hdev, 1);
612 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200613 case DISCOVERY_RESOLVING:
614 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200615 case DISCOVERY_STOPPING:
616 break;
617 }
618
619 hdev->discovery.state = state;
620}
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622static void inquiry_cache_flush(struct hci_dev *hdev)
623{
Johan Hedberg30883512012-01-04 14:16:21 +0200624 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200625 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Johan Hedberg561aafb2012-01-04 13:31:59 +0200627 list_for_each_entry_safe(p, n, &cache->all, all) {
628 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200629 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200631
632 INIT_LIST_HEAD(&cache->unknown);
633 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634}
635
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300636struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
637 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638{
Johan Hedberg30883512012-01-04 14:16:21 +0200639 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 struct inquiry_entry *e;
641
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300642 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Johan Hedberg561aafb2012-01-04 13:31:59 +0200644 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200646 return e;
647 }
648
649 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650}
651
Johan Hedberg561aafb2012-01-04 13:31:59 +0200652struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300653 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200654{
Johan Hedberg30883512012-01-04 14:16:21 +0200655 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200656 struct inquiry_entry *e;
657
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300658 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200659
660 list_for_each_entry(e, &cache->unknown, list) {
661 if (!bacmp(&e->data.bdaddr, bdaddr))
662 return e;
663 }
664
665 return NULL;
666}
667
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200668struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300669 bdaddr_t *bdaddr,
670 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200671{
672 struct discovery_state *cache = &hdev->discovery;
673 struct inquiry_entry *e;
674
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300675 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200676
677 list_for_each_entry(e, &cache->resolve, list) {
678 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
679 return e;
680 if (!bacmp(&e->data.bdaddr, bdaddr))
681 return e;
682 }
683
684 return NULL;
685}
686
Johan Hedberga3d4e202012-01-09 00:53:02 +0200687void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300688 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200689{
690 struct discovery_state *cache = &hdev->discovery;
691 struct list_head *pos = &cache->resolve;
692 struct inquiry_entry *p;
693
694 list_del(&ie->list);
695
696 list_for_each_entry(p, &cache->resolve, list) {
697 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300698 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200699 break;
700 pos = &p->list;
701 }
702
703 list_add(&ie->list, pos);
704}
705
Johan Hedberg31754052012-01-04 13:39:52 +0200706bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300707 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
Johan Hedberg30883512012-01-04 14:16:21 +0200709 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200710 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300712 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Szymon Janc2b2fec42012-11-20 11:38:54 +0100714 hci_remove_remote_oob_data(hdev, &data->bdaddr);
715
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200716 if (ssp)
717 *ssp = data->ssp_mode;
718
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200719 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200720 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200721 if (ie->data.ssp_mode && ssp)
722 *ssp = true;
723
Johan Hedberga3d4e202012-01-09 00:53:02 +0200724 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300725 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200726 ie->data.rssi = data->rssi;
727 hci_inquiry_cache_update_resolve(hdev, ie);
728 }
729
Johan Hedberg561aafb2012-01-04 13:31:59 +0200730 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200731 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200732
Johan Hedberg561aafb2012-01-04 13:31:59 +0200733 /* Entry not in the cache. Add new one. */
734 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
735 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200736 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200737
738 list_add(&ie->all, &cache->all);
739
740 if (name_known) {
741 ie->name_state = NAME_KNOWN;
742 } else {
743 ie->name_state = NAME_NOT_KNOWN;
744 list_add(&ie->list, &cache->unknown);
745 }
746
747update:
748 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300749 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200750 ie->name_state = NAME_KNOWN;
751 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
753
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200754 memcpy(&ie->data, data, sizeof(*data));
755 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200757
758 if (ie->name_state == NAME_NOT_KNOWN)
759 return false;
760
761 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762}
763
764static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
765{
Johan Hedberg30883512012-01-04 14:16:21 +0200766 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 struct inquiry_info *info = (struct inquiry_info *) buf;
768 struct inquiry_entry *e;
769 int copied = 0;
770
Johan Hedberg561aafb2012-01-04 13:31:59 +0200771 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200773
774 if (copied >= num)
775 break;
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 bacpy(&info->bdaddr, &data->bdaddr);
778 info->pscan_rep_mode = data->pscan_rep_mode;
779 info->pscan_period_mode = data->pscan_period_mode;
780 info->pscan_mode = data->pscan_mode;
781 memcpy(info->dev_class, data->dev_class, 3);
782 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200785 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 }
787
788 BT_DBG("cache %p, copied %d", cache, copied);
789 return copied;
790}
791
Johan Hedberg42c6b122013-03-05 20:37:49 +0200792static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
794 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200795 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 struct hci_cp_inquiry cp;
797
798 BT_DBG("%s", hdev->name);
799
800 if (test_bit(HCI_INQUIRY, &hdev->flags))
801 return;
802
803 /* Start Inquiry */
804 memcpy(&cp.lap, &ir->lap, 3);
805 cp.length = ir->length;
806 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200807 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808}
809
810int hci_inquiry(void __user *arg)
811{
812 __u8 __user *ptr = arg;
813 struct hci_inquiry_req ir;
814 struct hci_dev *hdev;
815 int err = 0, do_inquiry = 0, max_rsp;
816 long timeo;
817 __u8 *buf;
818
819 if (copy_from_user(&ir, ptr, sizeof(ir)))
820 return -EFAULT;
821
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200822 hdev = hci_dev_get(ir.dev_id);
823 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 return -ENODEV;
825
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300826 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900827 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300828 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 inquiry_cache_flush(hdev);
830 do_inquiry = 1;
831 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300832 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
Marcel Holtmann04837f62006-07-03 10:02:33 +0200834 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200835
836 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200837 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
838 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200839 if (err < 0)
840 goto done;
841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300843 /* for unlimited number of responses we will use buffer with
844 * 255 entries
845 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
847
848 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
849 * copy it to the user space.
850 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100851 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200852 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 err = -ENOMEM;
854 goto done;
855 }
856
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300857 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300859 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
861 BT_DBG("num_rsp %d", ir.num_rsp);
862
863 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
864 ptr += sizeof(ir);
865 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300866 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900868 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 err = -EFAULT;
870
871 kfree(buf);
872
873done:
874 hci_dev_put(hdev);
875 return err;
876}
877
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100878static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
879{
880 u8 ad_len = 0, flags = 0;
881 size_t name_len;
882
883 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
884 flags |= LE_AD_GENERAL;
885
886 if (!lmp_bredr_capable(hdev))
887 flags |= LE_AD_NO_BREDR;
888
889 if (lmp_le_br_capable(hdev))
890 flags |= LE_AD_SIM_LE_BREDR_CTRL;
891
892 if (lmp_host_le_br_capable(hdev))
893 flags |= LE_AD_SIM_LE_BREDR_HOST;
894
895 if (flags) {
896 BT_DBG("adv flags 0x%02x", flags);
897
898 ptr[0] = 2;
899 ptr[1] = EIR_FLAGS;
900 ptr[2] = flags;
901
902 ad_len += 3;
903 ptr += 3;
904 }
905
906 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
907 ptr[0] = 2;
908 ptr[1] = EIR_TX_POWER;
909 ptr[2] = (u8) hdev->adv_tx_power;
910
911 ad_len += 3;
912 ptr += 3;
913 }
914
915 name_len = strlen(hdev->dev_name);
916 if (name_len > 0) {
917 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
918
919 if (name_len > max_len) {
920 name_len = max_len;
921 ptr[1] = EIR_NAME_SHORT;
922 } else
923 ptr[1] = EIR_NAME_COMPLETE;
924
925 ptr[0] = name_len + 1;
926
927 memcpy(ptr + 2, hdev->dev_name, name_len);
928
929 ad_len += (name_len + 2);
930 ptr += (name_len + 2);
931 }
932
933 return ad_len;
934}
935
936int hci_update_ad(struct hci_dev *hdev)
937{
938 struct hci_cp_le_set_adv_data cp;
939 u8 len;
940 int err;
941
942 hci_dev_lock(hdev);
943
944 if (!lmp_le_capable(hdev)) {
945 err = -EINVAL;
946 goto unlock;
947 }
948
949 memset(&cp, 0, sizeof(cp));
950
951 len = create_ad(hdev, cp.data);
952
953 if (hdev->adv_data_len == len &&
954 memcmp(cp.data, hdev->adv_data, len) == 0) {
955 err = 0;
956 goto unlock;
957 }
958
959 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
960 hdev->adv_data_len = len;
961
962 cp.length = len;
963 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
964
965unlock:
966 hci_dev_unlock(hdev);
967
968 return err;
969}
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/* ---- HCI ioctl helpers ---- */
972
973int hci_dev_open(__u16 dev)
974{
975 struct hci_dev *hdev;
976 int ret = 0;
977
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200978 hdev = hci_dev_get(dev);
979 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 return -ENODEV;
981
982 BT_DBG("%s %p", hdev->name, hdev);
983
984 hci_req_lock(hdev);
985
Johan Hovold94324962012-03-15 14:48:41 +0100986 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
987 ret = -ENODEV;
988 goto done;
989 }
990
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200991 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
992 ret = -ERFKILL;
993 goto done;
994 }
995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 if (test_bit(HCI_UP, &hdev->flags)) {
997 ret = -EALREADY;
998 goto done;
999 }
1000
1001 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1002 set_bit(HCI_RAW, &hdev->flags);
1003
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +02001004 /* Treat all non BR/EDR controllers as raw devices if
1005 enable_hs is not set */
1006 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +01001007 set_bit(HCI_RAW, &hdev->flags);
1008
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 if (hdev->open(hdev)) {
1010 ret = -EIO;
1011 goto done;
1012 }
1013
1014 if (!test_bit(HCI_RAW, &hdev->flags)) {
1015 atomic_set(&hdev->cmd_cnt, 1);
1016 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001017 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 clear_bit(HCI_INIT, &hdev->flags);
1019 }
1020
1021 if (!ret) {
1022 hci_dev_hold(hdev);
1023 set_bit(HCI_UP, &hdev->flags);
1024 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001025 hci_update_ad(hdev);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001026 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1027 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001028 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001029 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001030 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001031 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001032 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001034 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001035 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001036 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
1038 skb_queue_purge(&hdev->cmd_q);
1039 skb_queue_purge(&hdev->rx_q);
1040
1041 if (hdev->flush)
1042 hdev->flush(hdev);
1043
1044 if (hdev->sent_cmd) {
1045 kfree_skb(hdev->sent_cmd);
1046 hdev->sent_cmd = NULL;
1047 }
1048
1049 hdev->close(hdev);
1050 hdev->flags = 0;
1051 }
1052
1053done:
1054 hci_req_unlock(hdev);
1055 hci_dev_put(hdev);
1056 return ret;
1057}
1058
1059static int hci_dev_do_close(struct hci_dev *hdev)
1060{
1061 BT_DBG("%s %p", hdev->name, hdev);
1062
Andre Guedes28b75a82012-02-03 17:48:00 -03001063 cancel_work_sync(&hdev->le_scan);
1064
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001065 cancel_delayed_work(&hdev->power_off);
1066
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 hci_req_cancel(hdev, ENODEV);
1068 hci_req_lock(hdev);
1069
1070 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001071 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 hci_req_unlock(hdev);
1073 return 0;
1074 }
1075
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001076 /* Flush RX and TX works */
1077 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001078 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001080 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001081 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001082 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001083 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001084 }
1085
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001086 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001087 cancel_delayed_work(&hdev->service_cache);
1088
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001089 cancel_delayed_work_sync(&hdev->le_scan_disable);
1090
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001091 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 inquiry_cache_flush(hdev);
1093 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001094 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
1096 hci_notify(hdev, HCI_DEV_DOWN);
1097
1098 if (hdev->flush)
1099 hdev->flush(hdev);
1100
1101 /* Reset device */
1102 skb_queue_purge(&hdev->cmd_q);
1103 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001104 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001105 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001107 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 clear_bit(HCI_INIT, &hdev->flags);
1109 }
1110
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001111 /* flush cmd work */
1112 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
1114 /* Drop queues */
1115 skb_queue_purge(&hdev->rx_q);
1116 skb_queue_purge(&hdev->cmd_q);
1117 skb_queue_purge(&hdev->raw_q);
1118
1119 /* Drop last sent command */
1120 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001121 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 kfree_skb(hdev->sent_cmd);
1123 hdev->sent_cmd = NULL;
1124 }
1125
1126 /* After this point our queues are empty
1127 * and no tasks are scheduled. */
1128 hdev->close(hdev);
1129
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001130 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1131 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001132 hci_dev_lock(hdev);
1133 mgmt_powered(hdev, 0);
1134 hci_dev_unlock(hdev);
1135 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001136
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 /* Clear flags */
1138 hdev->flags = 0;
1139
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001140 /* Controller radio is available but is currently powered down */
1141 hdev->amp_status = 0;
1142
Johan Hedberge59fda82012-02-22 18:11:53 +02001143 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001144 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 hci_req_unlock(hdev);
1147
1148 hci_dev_put(hdev);
1149 return 0;
1150}
1151
1152int hci_dev_close(__u16 dev)
1153{
1154 struct hci_dev *hdev;
1155 int err;
1156
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001157 hdev = hci_dev_get(dev);
1158 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001160
1161 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1162 cancel_delayed_work(&hdev->power_off);
1163
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 hci_dev_put(hdev);
1167 return err;
1168}
1169
1170int hci_dev_reset(__u16 dev)
1171{
1172 struct hci_dev *hdev;
1173 int ret = 0;
1174
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001175 hdev = hci_dev_get(dev);
1176 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 return -ENODEV;
1178
1179 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181 if (!test_bit(HCI_UP, &hdev->flags))
1182 goto done;
1183
1184 /* Drop queues */
1185 skb_queue_purge(&hdev->rx_q);
1186 skb_queue_purge(&hdev->cmd_q);
1187
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001188 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 inquiry_cache_flush(hdev);
1190 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001191 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
1193 if (hdev->flush)
1194 hdev->flush(hdev);
1195
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001196 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001197 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
1199 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001200 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 hci_req_unlock(hdev);
1204 hci_dev_put(hdev);
1205 return ret;
1206}
1207
1208int hci_dev_reset_stat(__u16 dev)
1209{
1210 struct hci_dev *hdev;
1211 int ret = 0;
1212
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001213 hdev = hci_dev_get(dev);
1214 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 return -ENODEV;
1216
1217 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1218
1219 hci_dev_put(hdev);
1220
1221 return ret;
1222}
1223
1224int hci_dev_cmd(unsigned int cmd, void __user *arg)
1225{
1226 struct hci_dev *hdev;
1227 struct hci_dev_req dr;
1228 int err = 0;
1229
1230 if (copy_from_user(&dr, arg, sizeof(dr)))
1231 return -EFAULT;
1232
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001233 hdev = hci_dev_get(dr.dev_id);
1234 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 return -ENODEV;
1236
1237 switch (cmd) {
1238 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001239 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1240 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 break;
1242
1243 case HCISETENCRYPT:
1244 if (!lmp_encrypt_capable(hdev)) {
1245 err = -EOPNOTSUPP;
1246 break;
1247 }
1248
1249 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1250 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001251 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1252 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 if (err)
1254 break;
1255 }
1256
Johan Hedberg01178cd2013-03-05 20:37:41 +02001257 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1258 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 break;
1260
1261 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001262 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1263 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 break;
1265
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001266 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001267 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1268 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001269 break;
1270
1271 case HCISETLINKMODE:
1272 hdev->link_mode = ((__u16) dr.dev_opt) &
1273 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1274 break;
1275
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 case HCISETPTYPE:
1277 hdev->pkt_type = (__u16) dr.dev_opt;
1278 break;
1279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001281 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1282 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 break;
1284
1285 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001286 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1287 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 break;
1289
1290 default:
1291 err = -EINVAL;
1292 break;
1293 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_dev_put(hdev);
1296 return err;
1297}
1298
1299int hci_get_dev_list(void __user *arg)
1300{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001301 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 struct hci_dev_list_req *dl;
1303 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 int n = 0, size, err;
1305 __u16 dev_num;
1306
1307 if (get_user(dev_num, (__u16 __user *) arg))
1308 return -EFAULT;
1309
1310 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1311 return -EINVAL;
1312
1313 size = sizeof(*dl) + dev_num * sizeof(*dr);
1314
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001315 dl = kzalloc(size, GFP_KERNEL);
1316 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 return -ENOMEM;
1318
1319 dr = dl->dev_req;
1320
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001321 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001322 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001323 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001324 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001325
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001326 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1327 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 (dr + n)->dev_id = hdev->id;
1330 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 if (++n >= dev_num)
1333 break;
1334 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001335 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
1337 dl->dev_num = n;
1338 size = sizeof(*dl) + n * sizeof(*dr);
1339
1340 err = copy_to_user(arg, dl, size);
1341 kfree(dl);
1342
1343 return err ? -EFAULT : 0;
1344}
1345
1346int hci_get_dev_info(void __user *arg)
1347{
1348 struct hci_dev *hdev;
1349 struct hci_dev_info di;
1350 int err = 0;
1351
1352 if (copy_from_user(&di, arg, sizeof(di)))
1353 return -EFAULT;
1354
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001355 hdev = hci_dev_get(di.dev_id);
1356 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 return -ENODEV;
1358
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001359 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001360 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001361
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001362 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1363 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001364
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 strcpy(di.name, hdev->name);
1366 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001367 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 di.flags = hdev->flags;
1369 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001370 if (lmp_bredr_capable(hdev)) {
1371 di.acl_mtu = hdev->acl_mtu;
1372 di.acl_pkts = hdev->acl_pkts;
1373 di.sco_mtu = hdev->sco_mtu;
1374 di.sco_pkts = hdev->sco_pkts;
1375 } else {
1376 di.acl_mtu = hdev->le_mtu;
1377 di.acl_pkts = hdev->le_pkts;
1378 di.sco_mtu = 0;
1379 di.sco_pkts = 0;
1380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 di.link_policy = hdev->link_policy;
1382 di.link_mode = hdev->link_mode;
1383
1384 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1385 memcpy(&di.features, &hdev->features, sizeof(di.features));
1386
1387 if (copy_to_user(arg, &di, sizeof(di)))
1388 err = -EFAULT;
1389
1390 hci_dev_put(hdev);
1391
1392 return err;
1393}
1394
1395/* ---- Interface to HCI drivers ---- */
1396
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001397static int hci_rfkill_set_block(void *data, bool blocked)
1398{
1399 struct hci_dev *hdev = data;
1400
1401 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1402
1403 if (!blocked)
1404 return 0;
1405
1406 hci_dev_do_close(hdev);
1407
1408 return 0;
1409}
1410
1411static const struct rfkill_ops hci_rfkill_ops = {
1412 .set_block = hci_rfkill_set_block,
1413};
1414
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001415static void hci_power_on(struct work_struct *work)
1416{
1417 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1418
1419 BT_DBG("%s", hdev->name);
1420
1421 if (hci_dev_open(hdev->id) < 0)
1422 return;
1423
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001424 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001425 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1426 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001427
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001428 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001429 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001430}
1431
1432static void hci_power_off(struct work_struct *work)
1433{
Johan Hedberg32435532011-11-07 22:16:04 +02001434 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001435 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001436
1437 BT_DBG("%s", hdev->name);
1438
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001439 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001440}
1441
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001442static void hci_discov_off(struct work_struct *work)
1443{
1444 struct hci_dev *hdev;
1445 u8 scan = SCAN_PAGE;
1446
1447 hdev = container_of(work, struct hci_dev, discov_off.work);
1448
1449 BT_DBG("%s", hdev->name);
1450
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001451 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001452
1453 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1454
1455 hdev->discov_timeout = 0;
1456
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001457 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001458}
1459
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001460int hci_uuids_clear(struct hci_dev *hdev)
1461{
Johan Hedberg48210022013-01-27 00:31:28 +02001462 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001463
Johan Hedberg48210022013-01-27 00:31:28 +02001464 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1465 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001466 kfree(uuid);
1467 }
1468
1469 return 0;
1470}
1471
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001472int hci_link_keys_clear(struct hci_dev *hdev)
1473{
1474 struct list_head *p, *n;
1475
1476 list_for_each_safe(p, n, &hdev->link_keys) {
1477 struct link_key *key;
1478
1479 key = list_entry(p, struct link_key, list);
1480
1481 list_del(p);
1482 kfree(key);
1483 }
1484
1485 return 0;
1486}
1487
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001488int hci_smp_ltks_clear(struct hci_dev *hdev)
1489{
1490 struct smp_ltk *k, *tmp;
1491
1492 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1493 list_del(&k->list);
1494 kfree(k);
1495 }
1496
1497 return 0;
1498}
1499
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001500struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1501{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001502 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001503
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001504 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001505 if (bacmp(bdaddr, &k->bdaddr) == 0)
1506 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001507
1508 return NULL;
1509}
1510
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301511static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001512 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001513{
1514 /* Legacy key */
1515 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301516 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001517
1518 /* Debug keys are insecure so don't store them persistently */
1519 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301520 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001521
1522 /* Changed combination key and there's no previous one */
1523 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301524 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001525
1526 /* Security mode 3 case */
1527 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301528 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001529
1530 /* Neither local nor remote side had no-bonding as requirement */
1531 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301532 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001533
1534 /* Local side had dedicated bonding as requirement */
1535 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301536 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001537
1538 /* Remote side had dedicated bonding as requirement */
1539 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301540 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001541
1542 /* If none of the above criteria match, then don't store the key
1543 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301544 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001545}
1546
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001547struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001548{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001549 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001550
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001551 list_for_each_entry(k, &hdev->long_term_keys, list) {
1552 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001553 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001554 continue;
1555
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001556 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001557 }
1558
1559 return NULL;
1560}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001561
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001562struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001563 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001564{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001565 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001566
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001567 list_for_each_entry(k, &hdev->long_term_keys, list)
1568 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001569 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001570 return k;
1571
1572 return NULL;
1573}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001574
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001575int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001576 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001577{
1578 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301579 u8 old_key_type;
1580 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001581
1582 old_key = hci_find_link_key(hdev, bdaddr);
1583 if (old_key) {
1584 old_key_type = old_key->type;
1585 key = old_key;
1586 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001587 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001588 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1589 if (!key)
1590 return -ENOMEM;
1591 list_add(&key->list, &hdev->link_keys);
1592 }
1593
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001594 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001595
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001596 /* Some buggy controller combinations generate a changed
1597 * combination key for legacy pairing even when there's no
1598 * previous key */
1599 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001600 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001601 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001602 if (conn)
1603 conn->key_type = type;
1604 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001605
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001606 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001607 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001608 key->pin_len = pin_len;
1609
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001610 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001611 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001612 else
1613 key->type = type;
1614
Johan Hedberg4df378a2011-04-28 11:29:03 -07001615 if (!new_key)
1616 return 0;
1617
1618 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1619
Johan Hedberg744cf192011-11-08 20:40:14 +02001620 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001621
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301622 if (conn)
1623 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001624
1625 return 0;
1626}
1627
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001628int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001629 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001630 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001631{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001632 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001633
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001634 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1635 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001636
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001637 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1638 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001639 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001640 else {
1641 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001642 if (!key)
1643 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001644 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001645 }
1646
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001647 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001648 key->bdaddr_type = addr_type;
1649 memcpy(key->val, tk, sizeof(key->val));
1650 key->authenticated = authenticated;
1651 key->ediv = ediv;
1652 key->enc_size = enc_size;
1653 key->type = type;
1654 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001655
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001656 if (!new_key)
1657 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001658
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001659 if (type & HCI_SMP_LTK)
1660 mgmt_new_ltk(hdev, key, 1);
1661
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001662 return 0;
1663}
1664
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001665int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1666{
1667 struct link_key *key;
1668
1669 key = hci_find_link_key(hdev, bdaddr);
1670 if (!key)
1671 return -ENOENT;
1672
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001673 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001674
1675 list_del(&key->list);
1676 kfree(key);
1677
1678 return 0;
1679}
1680
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001681int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1682{
1683 struct smp_ltk *k, *tmp;
1684
1685 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1686 if (bacmp(bdaddr, &k->bdaddr))
1687 continue;
1688
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001689 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001690
1691 list_del(&k->list);
1692 kfree(k);
1693 }
1694
1695 return 0;
1696}
1697
Ville Tervo6bd32322011-02-16 16:32:41 +02001698/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001699static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001700{
1701 struct hci_dev *hdev = (void *) arg;
1702
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001703 if (hdev->sent_cmd) {
1704 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1705 u16 opcode = __le16_to_cpu(sent->opcode);
1706
1707 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1708 } else {
1709 BT_ERR("%s command tx timeout", hdev->name);
1710 }
1711
Ville Tervo6bd32322011-02-16 16:32:41 +02001712 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001713 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001714}
1715
Szymon Janc2763eda2011-03-22 13:12:22 +01001716struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001717 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001718{
1719 struct oob_data *data;
1720
1721 list_for_each_entry(data, &hdev->remote_oob_data, list)
1722 if (bacmp(bdaddr, &data->bdaddr) == 0)
1723 return data;
1724
1725 return NULL;
1726}
1727
1728int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1729{
1730 struct oob_data *data;
1731
1732 data = hci_find_remote_oob_data(hdev, bdaddr);
1733 if (!data)
1734 return -ENOENT;
1735
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001736 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001737
1738 list_del(&data->list);
1739 kfree(data);
1740
1741 return 0;
1742}
1743
1744int hci_remote_oob_data_clear(struct hci_dev *hdev)
1745{
1746 struct oob_data *data, *n;
1747
1748 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1749 list_del(&data->list);
1750 kfree(data);
1751 }
1752
1753 return 0;
1754}
1755
1756int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001757 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001758{
1759 struct oob_data *data;
1760
1761 data = hci_find_remote_oob_data(hdev, bdaddr);
1762
1763 if (!data) {
1764 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1765 if (!data)
1766 return -ENOMEM;
1767
1768 bacpy(&data->bdaddr, bdaddr);
1769 list_add(&data->list, &hdev->remote_oob_data);
1770 }
1771
1772 memcpy(data->hash, hash, sizeof(data->hash));
1773 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1774
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001775 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001776
1777 return 0;
1778}
1779
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001780struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001781{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001782 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001783
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001784 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001785 if (bacmp(bdaddr, &b->bdaddr) == 0)
1786 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001787
1788 return NULL;
1789}
1790
1791int hci_blacklist_clear(struct hci_dev *hdev)
1792{
1793 struct list_head *p, *n;
1794
1795 list_for_each_safe(p, n, &hdev->blacklist) {
1796 struct bdaddr_list *b;
1797
1798 b = list_entry(p, struct bdaddr_list, list);
1799
1800 list_del(p);
1801 kfree(b);
1802 }
1803
1804 return 0;
1805}
1806
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001807int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001808{
1809 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001810
1811 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1812 return -EBADF;
1813
Antti Julku5e762442011-08-25 16:48:02 +03001814 if (hci_blacklist_lookup(hdev, bdaddr))
1815 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001816
1817 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001818 if (!entry)
1819 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001820
1821 bacpy(&entry->bdaddr, bdaddr);
1822
1823 list_add(&entry->list, &hdev->blacklist);
1824
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001825 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001826}
1827
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001828int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001829{
1830 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001831
Szymon Janc1ec918c2011-11-16 09:32:21 +01001832 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001833 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001834
1835 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001836 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001837 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001838
1839 list_del(&entry->list);
1840 kfree(entry);
1841
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001842 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001843}
1844
Johan Hedberg42c6b122013-03-05 20:37:49 +02001845static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001846{
1847 struct le_scan_params *param = (struct le_scan_params *) opt;
1848 struct hci_cp_le_set_scan_param cp;
1849
1850 memset(&cp, 0, sizeof(cp));
1851 cp.type = param->type;
1852 cp.interval = cpu_to_le16(param->interval);
1853 cp.window = cpu_to_le16(param->window);
1854
Johan Hedberg42c6b122013-03-05 20:37:49 +02001855 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001856}
1857
Johan Hedberg42c6b122013-03-05 20:37:49 +02001858static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001859{
1860 struct hci_cp_le_set_scan_enable cp;
1861
1862 memset(&cp, 0, sizeof(cp));
1863 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001864 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001865
Johan Hedberg42c6b122013-03-05 20:37:49 +02001866 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001867}
1868
1869static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001870 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001871{
1872 long timeo = msecs_to_jiffies(3000);
1873 struct le_scan_params param;
1874 int err;
1875
1876 BT_DBG("%s", hdev->name);
1877
1878 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1879 return -EINPROGRESS;
1880
1881 param.type = type;
1882 param.interval = interval;
1883 param.window = window;
1884
1885 hci_req_lock(hdev);
1886
Johan Hedberg01178cd2013-03-05 20:37:41 +02001887 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1888 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001889 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02001890 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001891
1892 hci_req_unlock(hdev);
1893
1894 if (err < 0)
1895 return err;
1896
Johan Hedberg46818ed2013-01-14 22:33:52 +02001897 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1898 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001899
1900 return 0;
1901}
1902
Andre Guedes7dbfac12012-03-15 16:52:07 -03001903int hci_cancel_le_scan(struct hci_dev *hdev)
1904{
1905 BT_DBG("%s", hdev->name);
1906
1907 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1908 return -EALREADY;
1909
1910 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1911 struct hci_cp_le_set_scan_enable cp;
1912
1913 /* Send HCI command to disable LE Scan */
1914 memset(&cp, 0, sizeof(cp));
1915 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1916 }
1917
1918 return 0;
1919}
1920
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001921static void le_scan_disable_work(struct work_struct *work)
1922{
1923 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001924 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001925 struct hci_cp_le_set_scan_enable cp;
1926
1927 BT_DBG("%s", hdev->name);
1928
1929 memset(&cp, 0, sizeof(cp));
1930
1931 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1932}
1933
Andre Guedes28b75a82012-02-03 17:48:00 -03001934static void le_scan_work(struct work_struct *work)
1935{
1936 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1937 struct le_scan_params *param = &hdev->le_scan_params;
1938
1939 BT_DBG("%s", hdev->name);
1940
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001941 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1942 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001943}
1944
1945int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001946 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001947{
1948 struct le_scan_params *param = &hdev->le_scan_params;
1949
1950 BT_DBG("%s", hdev->name);
1951
Johan Hedbergf15504782012-10-24 21:12:03 +03001952 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1953 return -ENOTSUPP;
1954
Andre Guedes28b75a82012-02-03 17:48:00 -03001955 if (work_busy(&hdev->le_scan))
1956 return -EINPROGRESS;
1957
1958 param->type = type;
1959 param->interval = interval;
1960 param->window = window;
1961 param->timeout = timeout;
1962
1963 queue_work(system_long_wq, &hdev->le_scan);
1964
1965 return 0;
1966}
1967
David Herrmann9be0dab2012-04-22 14:39:57 +02001968/* Alloc HCI device */
1969struct hci_dev *hci_alloc_dev(void)
1970{
1971 struct hci_dev *hdev;
1972
1973 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1974 if (!hdev)
1975 return NULL;
1976
David Herrmannb1b813d2012-04-22 14:39:58 +02001977 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1978 hdev->esco_type = (ESCO_HV1);
1979 hdev->link_mode = (HCI_LM_ACCEPT);
1980 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01001981 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1982 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02001983
David Herrmannb1b813d2012-04-22 14:39:58 +02001984 hdev->sniff_max_interval = 800;
1985 hdev->sniff_min_interval = 80;
1986
1987 mutex_init(&hdev->lock);
1988 mutex_init(&hdev->req_lock);
1989
1990 INIT_LIST_HEAD(&hdev->mgmt_pending);
1991 INIT_LIST_HEAD(&hdev->blacklist);
1992 INIT_LIST_HEAD(&hdev->uuids);
1993 INIT_LIST_HEAD(&hdev->link_keys);
1994 INIT_LIST_HEAD(&hdev->long_term_keys);
1995 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001996 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001997
1998 INIT_WORK(&hdev->rx_work, hci_rx_work);
1999 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2000 INIT_WORK(&hdev->tx_work, hci_tx_work);
2001 INIT_WORK(&hdev->power_on, hci_power_on);
2002 INIT_WORK(&hdev->le_scan, le_scan_work);
2003
David Herrmannb1b813d2012-04-22 14:39:58 +02002004 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2005 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2006 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2007
David Herrmann9be0dab2012-04-22 14:39:57 +02002008 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02002009 skb_queue_head_init(&hdev->rx_q);
2010 skb_queue_head_init(&hdev->cmd_q);
2011 skb_queue_head_init(&hdev->raw_q);
2012
2013 init_waitqueue_head(&hdev->req_wait_q);
2014
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002015 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002016
David Herrmannb1b813d2012-04-22 14:39:58 +02002017 hci_init_sysfs(hdev);
2018 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002019
2020 return hdev;
2021}
2022EXPORT_SYMBOL(hci_alloc_dev);
2023
2024/* Free HCI device */
2025void hci_free_dev(struct hci_dev *hdev)
2026{
2027 skb_queue_purge(&hdev->driver_init);
2028
2029 /* will free via device release */
2030 put_device(&hdev->dev);
2031}
2032EXPORT_SYMBOL(hci_free_dev);
2033
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034/* Register HCI device */
2035int hci_register_dev(struct hci_dev *hdev)
2036{
David Herrmannb1b813d2012-04-22 14:39:58 +02002037 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
David Herrmann010666a2012-01-07 15:47:07 +01002039 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 return -EINVAL;
2041
Mat Martineau08add512011-11-02 16:18:36 -07002042 /* Do not allow HCI_AMP devices to register at index 0,
2043 * so the index can be used as the AMP controller ID.
2044 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002045 switch (hdev->dev_type) {
2046 case HCI_BREDR:
2047 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2048 break;
2049 case HCI_AMP:
2050 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2051 break;
2052 default:
2053 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002055
Sasha Levin3df92b32012-05-27 22:36:56 +02002056 if (id < 0)
2057 return id;
2058
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 sprintf(hdev->name, "hci%d", id);
2060 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002061
2062 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2063
Sasha Levin3df92b32012-05-27 22:36:56 +02002064 write_lock(&hci_dev_list_lock);
2065 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002066 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002068 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002069 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002070 if (!hdev->workqueue) {
2071 error = -ENOMEM;
2072 goto err;
2073 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002074
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002075 hdev->req_workqueue = alloc_workqueue(hdev->name,
2076 WQ_HIGHPRI | WQ_UNBOUND |
2077 WQ_MEM_RECLAIM, 1);
2078 if (!hdev->req_workqueue) {
2079 destroy_workqueue(hdev->workqueue);
2080 error = -ENOMEM;
2081 goto err;
2082 }
2083
David Herrmann33ca9542011-10-08 14:58:49 +02002084 error = hci_add_sysfs(hdev);
2085 if (error < 0)
2086 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002088 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002089 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2090 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002091 if (hdev->rfkill) {
2092 if (rfkill_register(hdev->rfkill) < 0) {
2093 rfkill_destroy(hdev->rfkill);
2094 hdev->rfkill = NULL;
2095 }
2096 }
2097
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002098 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002099
2100 if (hdev->dev_type != HCI_AMP)
2101 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2102
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002104 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
Johan Hedberg19202572013-01-14 22:33:51 +02002106 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002107
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002109
David Herrmann33ca9542011-10-08 14:58:49 +02002110err_wqueue:
2111 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002112 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002113err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002114 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002115 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002116 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002117 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002118
David Herrmann33ca9542011-10-08 14:58:49 +02002119 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120}
2121EXPORT_SYMBOL(hci_register_dev);
2122
2123/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002124void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125{
Sasha Levin3df92b32012-05-27 22:36:56 +02002126 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002127
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002128 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
Johan Hovold94324962012-03-15 14:48:41 +01002130 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2131
Sasha Levin3df92b32012-05-27 22:36:56 +02002132 id = hdev->id;
2133
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002134 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002136 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
2138 hci_dev_do_close(hdev);
2139
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302140 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002141 kfree_skb(hdev->reassembly[i]);
2142
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002143 cancel_work_sync(&hdev->power_on);
2144
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002145 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002146 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002147 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002148 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002149 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002150 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002151
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002152 /* mgmt_index_removed should take care of emptying the
2153 * pending list */
2154 BUG_ON(!list_empty(&hdev->mgmt_pending));
2155
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 hci_notify(hdev, HCI_DEV_UNREG);
2157
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002158 if (hdev->rfkill) {
2159 rfkill_unregister(hdev->rfkill);
2160 rfkill_destroy(hdev->rfkill);
2161 }
2162
David Herrmannce242972011-10-08 14:58:48 +02002163 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002164
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002165 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002166 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002167
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002168 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002169 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002170 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002171 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002172 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002173 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002174 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002175
David Herrmanndc946bd2012-01-07 15:47:24 +01002176 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002177
2178 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179}
2180EXPORT_SYMBOL(hci_unregister_dev);
2181
2182/* Suspend HCI device */
2183int hci_suspend_dev(struct hci_dev *hdev)
2184{
2185 hci_notify(hdev, HCI_DEV_SUSPEND);
2186 return 0;
2187}
2188EXPORT_SYMBOL(hci_suspend_dev);
2189
2190/* Resume HCI device */
2191int hci_resume_dev(struct hci_dev *hdev)
2192{
2193 hci_notify(hdev, HCI_DEV_RESUME);
2194 return 0;
2195}
2196EXPORT_SYMBOL(hci_resume_dev);
2197
Marcel Holtmann76bca882009-11-18 00:40:39 +01002198/* Receive frame from HCI drivers */
2199int hci_recv_frame(struct sk_buff *skb)
2200{
2201 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2202 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002203 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002204 kfree_skb(skb);
2205 return -ENXIO;
2206 }
2207
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002208 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002209 bt_cb(skb)->incoming = 1;
2210
2211 /* Time stamp */
2212 __net_timestamp(skb);
2213
Marcel Holtmann76bca882009-11-18 00:40:39 +01002214 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002215 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002216
Marcel Holtmann76bca882009-11-18 00:40:39 +01002217 return 0;
2218}
2219EXPORT_SYMBOL(hci_recv_frame);
2220
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302221static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002222 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302223{
2224 int len = 0;
2225 int hlen = 0;
2226 int remain = count;
2227 struct sk_buff *skb;
2228 struct bt_skb_cb *scb;
2229
2230 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002231 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302232 return -EILSEQ;
2233
2234 skb = hdev->reassembly[index];
2235
2236 if (!skb) {
2237 switch (type) {
2238 case HCI_ACLDATA_PKT:
2239 len = HCI_MAX_FRAME_SIZE;
2240 hlen = HCI_ACL_HDR_SIZE;
2241 break;
2242 case HCI_EVENT_PKT:
2243 len = HCI_MAX_EVENT_SIZE;
2244 hlen = HCI_EVENT_HDR_SIZE;
2245 break;
2246 case HCI_SCODATA_PKT:
2247 len = HCI_MAX_SCO_SIZE;
2248 hlen = HCI_SCO_HDR_SIZE;
2249 break;
2250 }
2251
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002252 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302253 if (!skb)
2254 return -ENOMEM;
2255
2256 scb = (void *) skb->cb;
2257 scb->expect = hlen;
2258 scb->pkt_type = type;
2259
2260 skb->dev = (void *) hdev;
2261 hdev->reassembly[index] = skb;
2262 }
2263
2264 while (count) {
2265 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002266 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302267
2268 memcpy(skb_put(skb, len), data, len);
2269
2270 count -= len;
2271 data += len;
2272 scb->expect -= len;
2273 remain = count;
2274
2275 switch (type) {
2276 case HCI_EVENT_PKT:
2277 if (skb->len == HCI_EVENT_HDR_SIZE) {
2278 struct hci_event_hdr *h = hci_event_hdr(skb);
2279 scb->expect = h->plen;
2280
2281 if (skb_tailroom(skb) < scb->expect) {
2282 kfree_skb(skb);
2283 hdev->reassembly[index] = NULL;
2284 return -ENOMEM;
2285 }
2286 }
2287 break;
2288
2289 case HCI_ACLDATA_PKT:
2290 if (skb->len == HCI_ACL_HDR_SIZE) {
2291 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2292 scb->expect = __le16_to_cpu(h->dlen);
2293
2294 if (skb_tailroom(skb) < scb->expect) {
2295 kfree_skb(skb);
2296 hdev->reassembly[index] = NULL;
2297 return -ENOMEM;
2298 }
2299 }
2300 break;
2301
2302 case HCI_SCODATA_PKT:
2303 if (skb->len == HCI_SCO_HDR_SIZE) {
2304 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2305 scb->expect = h->dlen;
2306
2307 if (skb_tailroom(skb) < scb->expect) {
2308 kfree_skb(skb);
2309 hdev->reassembly[index] = NULL;
2310 return -ENOMEM;
2311 }
2312 }
2313 break;
2314 }
2315
2316 if (scb->expect == 0) {
2317 /* Complete frame */
2318
2319 bt_cb(skb)->pkt_type = type;
2320 hci_recv_frame(skb);
2321
2322 hdev->reassembly[index] = NULL;
2323 return remain;
2324 }
2325 }
2326
2327 return remain;
2328}
2329
Marcel Holtmannef222012007-07-11 06:42:04 +02002330int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2331{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302332 int rem = 0;
2333
Marcel Holtmannef222012007-07-11 06:42:04 +02002334 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2335 return -EILSEQ;
2336
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002337 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002338 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302339 if (rem < 0)
2340 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002341
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302342 data += (count - rem);
2343 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002344 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002345
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302346 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002347}
2348EXPORT_SYMBOL(hci_recv_fragment);
2349
Suraj Sumangala99811512010-07-14 13:02:19 +05302350#define STREAM_REASSEMBLY 0
2351
2352int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2353{
2354 int type;
2355 int rem = 0;
2356
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002357 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302358 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2359
2360 if (!skb) {
2361 struct { char type; } *pkt;
2362
2363 /* Start of the frame */
2364 pkt = data;
2365 type = pkt->type;
2366
2367 data++;
2368 count--;
2369 } else
2370 type = bt_cb(skb)->pkt_type;
2371
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002372 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002373 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302374 if (rem < 0)
2375 return rem;
2376
2377 data += (count - rem);
2378 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002379 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302380
2381 return rem;
2382}
2383EXPORT_SYMBOL(hci_recv_stream_fragment);
2384
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385/* ---- Interface to upper protocols ---- */
2386
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387int hci_register_cb(struct hci_cb *cb)
2388{
2389 BT_DBG("%p name %s", cb, cb->name);
2390
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002391 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002393 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
2395 return 0;
2396}
2397EXPORT_SYMBOL(hci_register_cb);
2398
2399int hci_unregister_cb(struct hci_cb *cb)
2400{
2401 BT_DBG("%p name %s", cb, cb->name);
2402
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002403 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002405 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
2407 return 0;
2408}
2409EXPORT_SYMBOL(hci_unregister_cb);
2410
2411static int hci_send_frame(struct sk_buff *skb)
2412{
2413 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2414
2415 if (!hdev) {
2416 kfree_skb(skb);
2417 return -ENODEV;
2418 }
2419
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002420 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002422 /* Time stamp */
2423 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002425 /* Send copy to monitor */
2426 hci_send_to_monitor(hdev, skb);
2427
2428 if (atomic_read(&hdev->promisc)) {
2429 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002430 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 }
2432
2433 /* Get rid of skb owner, prior to sending to the driver. */
2434 skb_orphan(skb);
2435
2436 return hdev->send(skb);
2437}
2438
Johan Hedberg3119ae92013-03-05 20:37:44 +02002439void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2440{
2441 skb_queue_head_init(&req->cmd_q);
2442 req->hdev = hdev;
2443}
2444
2445int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2446{
2447 struct hci_dev *hdev = req->hdev;
2448 struct sk_buff *skb;
2449 unsigned long flags;
2450
2451 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2452
2453 /* Do not allow empty requests */
2454 if (skb_queue_empty(&req->cmd_q))
2455 return -EINVAL;
2456
2457 skb = skb_peek_tail(&req->cmd_q);
2458 bt_cb(skb)->req.complete = complete;
2459
2460 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2461 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2462 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2463
2464 queue_work(hdev->workqueue, &hdev->cmd_work);
2465
2466 return 0;
2467}
2468
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002469static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2470 u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471{
2472 int len = HCI_COMMAND_HDR_SIZE + plen;
2473 struct hci_command_hdr *hdr;
2474 struct sk_buff *skb;
2475
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002477 if (!skb)
2478 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
2480 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002481 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 hdr->plen = plen;
2483
2484 if (plen)
2485 memcpy(skb_put(skb, plen), param, plen);
2486
2487 BT_DBG("skb len %d", skb->len);
2488
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002489 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002491
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002492 return skb;
2493}
2494
2495/* Send HCI command */
2496int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2497{
2498 struct sk_buff *skb;
2499
2500 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2501
2502 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2503 if (!skb) {
2504 BT_ERR("%s no memory for command", hdev->name);
2505 return -ENOMEM;
2506 }
2507
Johan Hedberg11714b32013-03-05 20:37:47 +02002508 /* Stand-alone HCI commands must be flaged as
2509 * single-command requests.
2510 */
2511 bt_cb(skb)->req.start = true;
2512
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002514 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
2516 return 0;
2517}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
Johan Hedberg71c76a12013-03-05 20:37:46 +02002519/* Queue a command to an asynchronous HCI request */
2520int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2521{
2522 struct hci_dev *hdev = req->hdev;
2523 struct sk_buff *skb;
2524
2525 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2526
2527 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2528 if (!skb) {
2529 BT_ERR("%s no memory for command", hdev->name);
2530 return -ENOMEM;
2531 }
2532
2533 if (skb_queue_empty(&req->cmd_q))
2534 bt_cb(skb)->req.start = true;
2535
2536 skb_queue_tail(&req->cmd_q, skb);
2537
2538 return 0;
2539}
2540
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002542void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543{
2544 struct hci_command_hdr *hdr;
2545
2546 if (!hdev->sent_cmd)
2547 return NULL;
2548
2549 hdr = (void *) hdev->sent_cmd->data;
2550
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002551 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 return NULL;
2553
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002554 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555
2556 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2557}
2558
2559/* Send ACL data */
2560static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2561{
2562 struct hci_acl_hdr *hdr;
2563 int len = skb->len;
2564
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002565 skb_push(skb, HCI_ACL_HDR_SIZE);
2566 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002567 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002568 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2569 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570}
2571
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002572static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002573 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002575 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 struct hci_dev *hdev = conn->hdev;
2577 struct sk_buff *list;
2578
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002579 skb->len = skb_headlen(skb);
2580 skb->data_len = 0;
2581
2582 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002583
2584 switch (hdev->dev_type) {
2585 case HCI_BREDR:
2586 hci_add_acl_hdr(skb, conn->handle, flags);
2587 break;
2588 case HCI_AMP:
2589 hci_add_acl_hdr(skb, chan->handle, flags);
2590 break;
2591 default:
2592 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2593 return;
2594 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002595
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002596 list = skb_shinfo(skb)->frag_list;
2597 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 /* Non fragmented */
2599 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2600
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002601 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 } else {
2603 /* Fragmented */
2604 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2605
2606 skb_shinfo(skb)->frag_list = NULL;
2607
2608 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002609 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002611 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002612
2613 flags &= ~ACL_START;
2614 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 do {
2616 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002617
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002619 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002620 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621
2622 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2623
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002624 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 } while (list);
2626
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002627 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002629}
2630
2631void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2632{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002633 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002634
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002635 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002636
2637 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002638
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002639 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002641 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643
2644/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002645void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646{
2647 struct hci_dev *hdev = conn->hdev;
2648 struct hci_sco_hdr hdr;
2649
2650 BT_DBG("%s len %d", hdev->name, skb->len);
2651
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002652 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 hdr.dlen = skb->len;
2654
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002655 skb_push(skb, HCI_SCO_HDR_SIZE);
2656 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002657 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658
2659 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002660 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002663 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665
2666/* ---- HCI TX task (outgoing data) ---- */
2667
2668/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002669static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2670 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671{
2672 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002673 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002674 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002676 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002678
2679 rcu_read_lock();
2680
2681 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002682 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002684
2685 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2686 continue;
2687
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 num++;
2689
2690 if (c->sent < min) {
2691 min = c->sent;
2692 conn = c;
2693 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002694
2695 if (hci_conn_num(hdev, type) == num)
2696 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 }
2698
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002699 rcu_read_unlock();
2700
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002702 int cnt, q;
2703
2704 switch (conn->type) {
2705 case ACL_LINK:
2706 cnt = hdev->acl_cnt;
2707 break;
2708 case SCO_LINK:
2709 case ESCO_LINK:
2710 cnt = hdev->sco_cnt;
2711 break;
2712 case LE_LINK:
2713 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2714 break;
2715 default:
2716 cnt = 0;
2717 BT_ERR("Unknown link type");
2718 }
2719
2720 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 *quote = q ? q : 1;
2722 } else
2723 *quote = 0;
2724
2725 BT_DBG("conn %p quote %d", conn, *quote);
2726 return conn;
2727}
2728
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002729static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730{
2731 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002732 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733
Ville Tervobae1f5d92011-02-10 22:38:53 -03002734 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002736 rcu_read_lock();
2737
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002739 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002740 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002741 BT_ERR("%s killing stalled connection %pMR",
2742 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002743 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 }
2745 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002746
2747 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748}
2749
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002750static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2751 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002752{
2753 struct hci_conn_hash *h = &hdev->conn_hash;
2754 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002755 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002756 struct hci_conn *conn;
2757 int cnt, q, conn_num = 0;
2758
2759 BT_DBG("%s", hdev->name);
2760
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002761 rcu_read_lock();
2762
2763 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002764 struct hci_chan *tmp;
2765
2766 if (conn->type != type)
2767 continue;
2768
2769 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2770 continue;
2771
2772 conn_num++;
2773
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002774 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002775 struct sk_buff *skb;
2776
2777 if (skb_queue_empty(&tmp->data_q))
2778 continue;
2779
2780 skb = skb_peek(&tmp->data_q);
2781 if (skb->priority < cur_prio)
2782 continue;
2783
2784 if (skb->priority > cur_prio) {
2785 num = 0;
2786 min = ~0;
2787 cur_prio = skb->priority;
2788 }
2789
2790 num++;
2791
2792 if (conn->sent < min) {
2793 min = conn->sent;
2794 chan = tmp;
2795 }
2796 }
2797
2798 if (hci_conn_num(hdev, type) == conn_num)
2799 break;
2800 }
2801
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002802 rcu_read_unlock();
2803
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002804 if (!chan)
2805 return NULL;
2806
2807 switch (chan->conn->type) {
2808 case ACL_LINK:
2809 cnt = hdev->acl_cnt;
2810 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002811 case AMP_LINK:
2812 cnt = hdev->block_cnt;
2813 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002814 case SCO_LINK:
2815 case ESCO_LINK:
2816 cnt = hdev->sco_cnt;
2817 break;
2818 case LE_LINK:
2819 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2820 break;
2821 default:
2822 cnt = 0;
2823 BT_ERR("Unknown link type");
2824 }
2825
2826 q = cnt / num;
2827 *quote = q ? q : 1;
2828 BT_DBG("chan %p quote %d", chan, *quote);
2829 return chan;
2830}
2831
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002832static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2833{
2834 struct hci_conn_hash *h = &hdev->conn_hash;
2835 struct hci_conn *conn;
2836 int num = 0;
2837
2838 BT_DBG("%s", hdev->name);
2839
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002840 rcu_read_lock();
2841
2842 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002843 struct hci_chan *chan;
2844
2845 if (conn->type != type)
2846 continue;
2847
2848 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2849 continue;
2850
2851 num++;
2852
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002853 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002854 struct sk_buff *skb;
2855
2856 if (chan->sent) {
2857 chan->sent = 0;
2858 continue;
2859 }
2860
2861 if (skb_queue_empty(&chan->data_q))
2862 continue;
2863
2864 skb = skb_peek(&chan->data_q);
2865 if (skb->priority >= HCI_PRIO_MAX - 1)
2866 continue;
2867
2868 skb->priority = HCI_PRIO_MAX - 1;
2869
2870 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002871 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002872 }
2873
2874 if (hci_conn_num(hdev, type) == num)
2875 break;
2876 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002877
2878 rcu_read_unlock();
2879
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002880}
2881
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002882static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2883{
2884 /* Calculate count of blocks used by this packet */
2885 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2886}
2887
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002888static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 if (!test_bit(HCI_RAW, &hdev->flags)) {
2891 /* ACL tx timeout must be longer than maximum
2892 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002893 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002894 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002895 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002897}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002899static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002900{
2901 unsigned int cnt = hdev->acl_cnt;
2902 struct hci_chan *chan;
2903 struct sk_buff *skb;
2904 int quote;
2905
2906 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002907
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002908 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002909 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002910 u32 priority = (skb_peek(&chan->data_q))->priority;
2911 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002912 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002913 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002914
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002915 /* Stop if priority has changed */
2916 if (skb->priority < priority)
2917 break;
2918
2919 skb = skb_dequeue(&chan->data_q);
2920
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002921 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002922 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002923
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 hci_send_frame(skb);
2925 hdev->acl_last_tx = jiffies;
2926
2927 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002928 chan->sent++;
2929 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 }
2931 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002932
2933 if (cnt != hdev->acl_cnt)
2934 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935}
2936
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002937static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002938{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002939 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002940 struct hci_chan *chan;
2941 struct sk_buff *skb;
2942 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002943 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002944
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002945 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002946
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002947 BT_DBG("%s", hdev->name);
2948
2949 if (hdev->dev_type == HCI_AMP)
2950 type = AMP_LINK;
2951 else
2952 type = ACL_LINK;
2953
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002954 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002955 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002956 u32 priority = (skb_peek(&chan->data_q))->priority;
2957 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2958 int blocks;
2959
2960 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002961 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002962
2963 /* Stop if priority has changed */
2964 if (skb->priority < priority)
2965 break;
2966
2967 skb = skb_dequeue(&chan->data_q);
2968
2969 blocks = __get_blocks(hdev, skb);
2970 if (blocks > hdev->block_cnt)
2971 return;
2972
2973 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002974 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002975
2976 hci_send_frame(skb);
2977 hdev->acl_last_tx = jiffies;
2978
2979 hdev->block_cnt -= blocks;
2980 quote -= blocks;
2981
2982 chan->sent += blocks;
2983 chan->conn->sent += blocks;
2984 }
2985 }
2986
2987 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002988 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002989}
2990
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002991static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002992{
2993 BT_DBG("%s", hdev->name);
2994
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002995 /* No ACL link over BR/EDR controller */
2996 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2997 return;
2998
2999 /* No AMP link over AMP controller */
3000 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003001 return;
3002
3003 switch (hdev->flow_ctl_mode) {
3004 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3005 hci_sched_acl_pkt(hdev);
3006 break;
3007
3008 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3009 hci_sched_acl_blk(hdev);
3010 break;
3011 }
3012}
3013
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003015static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016{
3017 struct hci_conn *conn;
3018 struct sk_buff *skb;
3019 int quote;
3020
3021 BT_DBG("%s", hdev->name);
3022
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003023 if (!hci_conn_num(hdev, SCO_LINK))
3024 return;
3025
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3027 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3028 BT_DBG("skb %p len %d", skb, skb->len);
3029 hci_send_frame(skb);
3030
3031 conn->sent++;
3032 if (conn->sent == ~0)
3033 conn->sent = 0;
3034 }
3035 }
3036}
3037
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003038static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003039{
3040 struct hci_conn *conn;
3041 struct sk_buff *skb;
3042 int quote;
3043
3044 BT_DBG("%s", hdev->name);
3045
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003046 if (!hci_conn_num(hdev, ESCO_LINK))
3047 return;
3048
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003049 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3050 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003051 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3052 BT_DBG("skb %p len %d", skb, skb->len);
3053 hci_send_frame(skb);
3054
3055 conn->sent++;
3056 if (conn->sent == ~0)
3057 conn->sent = 0;
3058 }
3059 }
3060}
3061
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003062static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003063{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003064 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003065 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003066 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003067
3068 BT_DBG("%s", hdev->name);
3069
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003070 if (!hci_conn_num(hdev, LE_LINK))
3071 return;
3072
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003073 if (!test_bit(HCI_RAW, &hdev->flags)) {
3074 /* LE tx timeout must be longer than maximum
3075 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003076 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003077 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003078 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003079 }
3080
3081 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003082 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003083 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003084 u32 priority = (skb_peek(&chan->data_q))->priority;
3085 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003086 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003087 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003088
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003089 /* Stop if priority has changed */
3090 if (skb->priority < priority)
3091 break;
3092
3093 skb = skb_dequeue(&chan->data_q);
3094
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003095 hci_send_frame(skb);
3096 hdev->le_last_tx = jiffies;
3097
3098 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003099 chan->sent++;
3100 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003101 }
3102 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003103
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003104 if (hdev->le_pkts)
3105 hdev->le_cnt = cnt;
3106 else
3107 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003108
3109 if (cnt != tmp)
3110 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003111}
3112
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003113static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003115 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 struct sk_buff *skb;
3117
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003118 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003119 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120
3121 /* Schedule queues and send stuff to HCI driver */
3122
3123 hci_sched_acl(hdev);
3124
3125 hci_sched_sco(hdev);
3126
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003127 hci_sched_esco(hdev);
3128
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003129 hci_sched_le(hdev);
3130
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 /* Send next queued raw (unknown type) packet */
3132 while ((skb = skb_dequeue(&hdev->raw_q)))
3133 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134}
3135
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003136/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137
3138/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003139static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140{
3141 struct hci_acl_hdr *hdr = (void *) skb->data;
3142 struct hci_conn *conn;
3143 __u16 handle, flags;
3144
3145 skb_pull(skb, HCI_ACL_HDR_SIZE);
3146
3147 handle = __le16_to_cpu(hdr->handle);
3148 flags = hci_flags(handle);
3149 handle = hci_handle(handle);
3150
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003151 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003152 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153
3154 hdev->stat.acl_rx++;
3155
3156 hci_dev_lock(hdev);
3157 conn = hci_conn_hash_lookup_handle(hdev, handle);
3158 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003159
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003161 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003162
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003164 l2cap_recv_acldata(conn, skb, flags);
3165 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003167 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003168 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 }
3170
3171 kfree_skb(skb);
3172}
3173
3174/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003175static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176{
3177 struct hci_sco_hdr *hdr = (void *) skb->data;
3178 struct hci_conn *conn;
3179 __u16 handle;
3180
3181 skb_pull(skb, HCI_SCO_HDR_SIZE);
3182
3183 handle = __le16_to_cpu(hdr->handle);
3184
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003185 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186
3187 hdev->stat.sco_rx++;
3188
3189 hci_dev_lock(hdev);
3190 conn = hci_conn_hash_lookup_handle(hdev, handle);
3191 hci_dev_unlock(hdev);
3192
3193 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003195 sco_recv_scodata(conn, skb);
3196 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003198 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003199 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200 }
3201
3202 kfree_skb(skb);
3203}
3204
Johan Hedberg9238f362013-03-05 20:37:48 +02003205static bool hci_req_is_complete(struct hci_dev *hdev)
3206{
3207 struct sk_buff *skb;
3208
3209 skb = skb_peek(&hdev->cmd_q);
3210 if (!skb)
3211 return true;
3212
3213 return bt_cb(skb)->req.start;
3214}
3215
Johan Hedberg42c6b122013-03-05 20:37:49 +02003216static void hci_resend_last(struct hci_dev *hdev)
3217{
3218 struct hci_command_hdr *sent;
3219 struct sk_buff *skb;
3220 u16 opcode;
3221
3222 if (!hdev->sent_cmd)
3223 return;
3224
3225 sent = (void *) hdev->sent_cmd->data;
3226 opcode = __le16_to_cpu(sent->opcode);
3227 if (opcode == HCI_OP_RESET)
3228 return;
3229
3230 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3231 if (!skb)
3232 return;
3233
3234 skb_queue_head(&hdev->cmd_q, skb);
3235 queue_work(hdev->workqueue, &hdev->cmd_work);
3236}
3237
Johan Hedberg9238f362013-03-05 20:37:48 +02003238void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3239{
3240 hci_req_complete_t req_complete = NULL;
3241 struct sk_buff *skb;
3242 unsigned long flags;
3243
3244 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3245
Johan Hedberg42c6b122013-03-05 20:37:49 +02003246 /* If the completed command doesn't match the last one that was
3247 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003248 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003249 if (!hci_sent_cmd_data(hdev, opcode)) {
3250 /* Some CSR based controllers generate a spontaneous
3251 * reset complete event during init and any pending
3252 * command will never be completed. In such a case we
3253 * need to resend whatever was the last sent
3254 * command.
3255 */
3256 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3257 hci_resend_last(hdev);
3258
Johan Hedberg9238f362013-03-05 20:37:48 +02003259 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003260 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003261
3262 /* If the command succeeded and there's still more commands in
3263 * this request the request is not yet complete.
3264 */
3265 if (!status && !hci_req_is_complete(hdev))
3266 return;
3267
3268 /* If this was the last command in a request the complete
3269 * callback would be found in hdev->sent_cmd instead of the
3270 * command queue (hdev->cmd_q).
3271 */
3272 if (hdev->sent_cmd) {
3273 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3274 if (req_complete)
3275 goto call_complete;
3276 }
3277
3278 /* Remove all pending commands belonging to this request */
3279 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3280 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3281 if (bt_cb(skb)->req.start) {
3282 __skb_queue_head(&hdev->cmd_q, skb);
3283 break;
3284 }
3285
3286 req_complete = bt_cb(skb)->req.complete;
3287 kfree_skb(skb);
3288 }
3289 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3290
3291call_complete:
3292 if (req_complete)
3293 req_complete(hdev, status);
3294}
3295
3296void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3297{
3298 hci_req_complete_t req_complete = NULL;
3299
3300 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3301
3302 if (status) {
3303 hci_req_cmd_complete(hdev, opcode, status);
3304 return;
3305 }
3306
3307 /* No need to handle success status if there are more commands */
3308 if (!hci_req_is_complete(hdev))
3309 return;
3310
3311 if (hdev->sent_cmd)
3312 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3313
3314 /* If the request doesn't have a complete callback or there
3315 * are other commands/requests in the hdev queue we consider
3316 * this request as completed.
3317 */
3318 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3319 hci_req_cmd_complete(hdev, opcode, status);
3320}
3321
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003322static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003324 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325 struct sk_buff *skb;
3326
3327 BT_DBG("%s", hdev->name);
3328
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003330 /* Send copy to monitor */
3331 hci_send_to_monitor(hdev, skb);
3332
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 if (atomic_read(&hdev->promisc)) {
3334 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003335 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 }
3337
3338 if (test_bit(HCI_RAW, &hdev->flags)) {
3339 kfree_skb(skb);
3340 continue;
3341 }
3342
3343 if (test_bit(HCI_INIT, &hdev->flags)) {
3344 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003345 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 case HCI_ACLDATA_PKT:
3347 case HCI_SCODATA_PKT:
3348 kfree_skb(skb);
3349 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 }
3352
3353 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003354 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003356 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357 hci_event_packet(hdev, skb);
3358 break;
3359
3360 case HCI_ACLDATA_PKT:
3361 BT_DBG("%s ACL data packet", hdev->name);
3362 hci_acldata_packet(hdev, skb);
3363 break;
3364
3365 case HCI_SCODATA_PKT:
3366 BT_DBG("%s SCO data packet", hdev->name);
3367 hci_scodata_packet(hdev, skb);
3368 break;
3369
3370 default:
3371 kfree_skb(skb);
3372 break;
3373 }
3374 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375}
3376
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003377static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003379 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 struct sk_buff *skb;
3381
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003382 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3383 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003386 if (atomic_read(&hdev->cmd_cnt)) {
3387 skb = skb_dequeue(&hdev->cmd_q);
3388 if (!skb)
3389 return;
3390
Wei Yongjun7585b972009-02-25 18:29:52 +08003391 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003393 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3394 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 atomic_dec(&hdev->cmd_cnt);
3396 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003397 if (test_bit(HCI_RESET, &hdev->flags))
3398 del_timer(&hdev->cmd_timer);
3399 else
3400 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003401 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 } else {
3403 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003404 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 }
3406 }
3407}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003408
3409int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3410{
3411 /* General inquiry access code (GIAC) */
3412 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3413 struct hci_cp_inquiry cp;
3414
3415 BT_DBG("%s", hdev->name);
3416
3417 if (test_bit(HCI_INQUIRY, &hdev->flags))
3418 return -EINPROGRESS;
3419
Johan Hedberg46632622012-01-02 16:06:08 +02003420 inquiry_cache_flush(hdev);
3421
Andre Guedes2519a1f2011-11-07 11:45:24 -03003422 memset(&cp, 0, sizeof(cp));
3423 memcpy(&cp.lap, lap, sizeof(cp.lap));
3424 cp.length = length;
3425
3426 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3427}
Andre Guedes023d50492011-11-04 14:16:52 -03003428
3429int hci_cancel_inquiry(struct hci_dev *hdev)
3430{
3431 BT_DBG("%s", hdev->name);
3432
3433 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003434 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003435
3436 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3437}
Andre Guedes31f79562012-04-24 21:02:53 -03003438
3439u8 bdaddr_to_le(u8 bdaddr_type)
3440{
3441 switch (bdaddr_type) {
3442 case BDADDR_LE_PUBLIC:
3443 return ADDR_LE_DEV_PUBLIC;
3444
3445 default:
3446 /* Fallback to LE Random address type */
3447 return ADDR_LE_DEV_RANDOM;
3448 }
3449}