blob: 8bffd3eb344d3271d4b2527de07824ca6b61a61d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Fabio Estevam8b281b92012-01-10 18:33:50 -020058bool enable_hs;
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020059
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200358
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
363 if (discov->state == DISCOVERY_INQUIRY ||
364 discov->state == DISCOVERY_RESOLVING)
365 return true;
366
367 return false;
368}
369
Johan Hedbergff9ef572012-01-04 14:23:45 +0200370void hci_discovery_set_state(struct hci_dev *hdev, int state)
371{
372 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
373
374 if (hdev->discovery.state == state)
375 return;
376
377 switch (state) {
378 case DISCOVERY_STOPPED:
379 mgmt_discovering(hdev, 0);
380 break;
381 case DISCOVERY_STARTING:
382 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200383 case DISCOVERY_INQUIRY:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200384 mgmt_discovering(hdev, 1);
385 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200386 case DISCOVERY_RESOLVING:
387 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200388 case DISCOVERY_STOPPING:
389 break;
390 }
391
392 hdev->discovery.state = state;
393}
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395static void inquiry_cache_flush(struct hci_dev *hdev)
396{
Johan Hedberg30883512012-01-04 14:16:21 +0200397 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200398 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Johan Hedberg561aafb2012-01-04 13:31:59 +0200400 list_for_each_entry_safe(p, n, &cache->all, all) {
401 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200402 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200404
405 INIT_LIST_HEAD(&cache->unknown);
406 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200407 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408}
409
410struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
411{
Johan Hedberg30883512012-01-04 14:16:21 +0200412 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 struct inquiry_entry *e;
414
415 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
416
Johan Hedberg561aafb2012-01-04 13:31:59 +0200417 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200419 return e;
420 }
421
422 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
426 bdaddr_t *bdaddr)
427{
Johan Hedberg30883512012-01-04 14:16:21 +0200428 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200429 struct inquiry_entry *e;
430
431 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
432
433 list_for_each_entry(e, &cache->unknown, list) {
434 if (!bacmp(&e->data.bdaddr, bdaddr))
435 return e;
436 }
437
438 return NULL;
439}
440
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200441struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
442 bdaddr_t *bdaddr,
443 int state)
444{
445 struct discovery_state *cache = &hdev->discovery;
446 struct inquiry_entry *e;
447
448 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
449
450 list_for_each_entry(e, &cache->resolve, list) {
451 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
452 return e;
453 if (!bacmp(&e->data.bdaddr, bdaddr))
454 return e;
455 }
456
457 return NULL;
458}
459
Johan Hedberga3d4e202012-01-09 00:53:02 +0200460void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
461 struct inquiry_entry *ie)
462{
463 struct discovery_state *cache = &hdev->discovery;
464 struct list_head *pos = &cache->resolve;
465 struct inquiry_entry *p;
466
467 list_del(&ie->list);
468
469 list_for_each_entry(p, &cache->resolve, list) {
470 if (p->name_state != NAME_PENDING &&
471 abs(p->data.rssi) >= abs(ie->data.rssi))
472 break;
473 pos = &p->list;
474 }
475
476 list_add(&ie->list, pos);
477}
478
Johan Hedberg31754052012-01-04 13:39:52 +0200479bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200480 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
Johan Hedberg30883512012-01-04 14:16:21 +0200482 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200483 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
485 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
486
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200487 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200488 if (ie) {
489 if (ie->name_state == NAME_NEEDED &&
490 data->rssi != ie->data.rssi) {
491 ie->data.rssi = data->rssi;
492 hci_inquiry_cache_update_resolve(hdev, ie);
493 }
494
Johan Hedberg561aafb2012-01-04 13:31:59 +0200495 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200496 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200497
Johan Hedberg561aafb2012-01-04 13:31:59 +0200498 /* Entry not in the cache. Add new one. */
499 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
500 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200501 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200502
503 list_add(&ie->all, &cache->all);
504
505 if (name_known) {
506 ie->name_state = NAME_KNOWN;
507 } else {
508 ie->name_state = NAME_NOT_KNOWN;
509 list_add(&ie->list, &cache->unknown);
510 }
511
512update:
513 if (name_known && ie->name_state != NAME_KNOWN &&
514 ie->name_state != NAME_PENDING) {
515 ie->name_state = NAME_KNOWN;
516 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 }
518
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200519 memcpy(&ie->data, data, sizeof(*data));
520 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200522
523 if (ie->name_state == NAME_NOT_KNOWN)
524 return false;
525
526 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527}
528
529static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
530{
Johan Hedberg30883512012-01-04 14:16:21 +0200531 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 struct inquiry_info *info = (struct inquiry_info *) buf;
533 struct inquiry_entry *e;
534 int copied = 0;
535
Johan Hedberg561aafb2012-01-04 13:31:59 +0200536 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200538
539 if (copied >= num)
540 break;
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 bacpy(&info->bdaddr, &data->bdaddr);
543 info->pscan_rep_mode = data->pscan_rep_mode;
544 info->pscan_period_mode = data->pscan_period_mode;
545 info->pscan_mode = data->pscan_mode;
546 memcpy(info->dev_class, data->dev_class, 3);
547 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200550 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
552
553 BT_DBG("cache %p, copied %d", cache, copied);
554 return copied;
555}
556
557static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
558{
559 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560 struct hci_cp_inquiry cp;
561
562 BT_DBG("%s", hdev->name);
563
564 if (test_bit(HCI_INQUIRY, &hdev->flags))
565 return;
566
567 /* Start Inquiry */
568 memcpy(&cp.lap, &ir->lap, 3);
569 cp.length = ir->length;
570 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200571 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
573
574int hci_inquiry(void __user *arg)
575{
576 __u8 __user *ptr = arg;
577 struct hci_inquiry_req ir;
578 struct hci_dev *hdev;
579 int err = 0, do_inquiry = 0, max_rsp;
580 long timeo;
581 __u8 *buf;
582
583 if (copy_from_user(&ir, ptr, sizeof(ir)))
584 return -EFAULT;
585
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200586 hdev = hci_dev_get(ir.dev_id);
587 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 return -ENODEV;
589
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300590 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900591 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200592 inquiry_cache_empty(hdev) ||
593 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 inquiry_cache_flush(hdev);
595 do_inquiry = 1;
596 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300597 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Marcel Holtmann04837f62006-07-03 10:02:33 +0200599 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200600
601 if (do_inquiry) {
602 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
603 if (err < 0)
604 goto done;
605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
607 /* for unlimited number of responses we will use buffer with 255 entries */
608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
612 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200614 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 err = -ENOMEM;
616 goto done;
617 }
618
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300619 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300621 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623 BT_DBG("num_rsp %d", ir.num_rsp);
624
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626 ptr += sizeof(ir);
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
628 ir.num_rsp))
629 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900630 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 err = -EFAULT;
632
633 kfree(buf);
634
635done:
636 hci_dev_put(hdev);
637 return err;
638}
639
640/* ---- HCI ioctl helpers ---- */
641
642int hci_dev_open(__u16 dev)
643{
644 struct hci_dev *hdev;
645 int ret = 0;
646
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200647 hdev = hci_dev_get(dev);
648 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 return -ENODEV;
650
651 BT_DBG("%s %p", hdev->name, hdev);
652
653 hci_req_lock(hdev);
654
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200655 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
656 ret = -ERFKILL;
657 goto done;
658 }
659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (test_bit(HCI_UP, &hdev->flags)) {
661 ret = -EALREADY;
662 goto done;
663 }
664
665 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
666 set_bit(HCI_RAW, &hdev->flags);
667
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200668 /* Treat all non BR/EDR controllers as raw devices if
669 enable_hs is not set */
670 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100671 set_bit(HCI_RAW, &hdev->flags);
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (hdev->open(hdev)) {
674 ret = -EIO;
675 goto done;
676 }
677
678 if (!test_bit(HCI_RAW, &hdev->flags)) {
679 atomic_set(&hdev->cmd_cnt, 1);
680 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200681 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Marcel Holtmann04837f62006-07-03 10:02:33 +0200683 ret = __hci_request(hdev, hci_init_req, 0,
684 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Andre Guedeseead27d2011-06-30 19:20:55 -0300686 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300687 ret = __hci_request(hdev, hci_le_init_req, 0,
688 msecs_to_jiffies(HCI_INIT_TIMEOUT));
689
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 clear_bit(HCI_INIT, &hdev->flags);
691 }
692
693 if (!ret) {
694 hci_dev_hold(hdev);
695 set_bit(HCI_UP, &hdev->flags);
696 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200697 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300698 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200699 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300700 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200701 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900702 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200704 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200705 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400706 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
708 skb_queue_purge(&hdev->cmd_q);
709 skb_queue_purge(&hdev->rx_q);
710
711 if (hdev->flush)
712 hdev->flush(hdev);
713
714 if (hdev->sent_cmd) {
715 kfree_skb(hdev->sent_cmd);
716 hdev->sent_cmd = NULL;
717 }
718
719 hdev->close(hdev);
720 hdev->flags = 0;
721 }
722
723done:
724 hci_req_unlock(hdev);
725 hci_dev_put(hdev);
726 return ret;
727}
728
729static int hci_dev_do_close(struct hci_dev *hdev)
730{
731 BT_DBG("%s %p", hdev->name, hdev);
732
733 hci_req_cancel(hdev, ENODEV);
734 hci_req_lock(hdev);
735
736 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300737 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 hci_req_unlock(hdev);
739 return 0;
740 }
741
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200742 /* Flush RX and TX works */
743 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400744 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200746 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200747 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200748 hdev->discov_timeout = 0;
749 }
750
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200751 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200752 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200753
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200754 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200755 cancel_delayed_work(&hdev->service_cache);
756
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300757 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 inquiry_cache_flush(hdev);
759 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300760 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
762 hci_notify(hdev, HCI_DEV_DOWN);
763
764 if (hdev->flush)
765 hdev->flush(hdev);
766
767 /* Reset device */
768 skb_queue_purge(&hdev->cmd_q);
769 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200770 if (!test_bit(HCI_RAW, &hdev->flags) &&
771 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200773 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200774 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 clear_bit(HCI_INIT, &hdev->flags);
776 }
777
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200778 /* flush cmd work */
779 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
781 /* Drop queues */
782 skb_queue_purge(&hdev->rx_q);
783 skb_queue_purge(&hdev->cmd_q);
784 skb_queue_purge(&hdev->raw_q);
785
786 /* Drop last sent command */
787 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300788 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 kfree_skb(hdev->sent_cmd);
790 hdev->sent_cmd = NULL;
791 }
792
793 /* After this point our queues are empty
794 * and no tasks are scheduled. */
795 hdev->close(hdev);
796
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300797 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200798 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300799 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 /* Clear flags */
802 hdev->flags = 0;
803
804 hci_req_unlock(hdev);
805
806 hci_dev_put(hdev);
807 return 0;
808}
809
810int hci_dev_close(__u16 dev)
811{
812 struct hci_dev *hdev;
813 int err;
814
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200815 hdev = hci_dev_get(dev);
816 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 return -ENODEV;
818 err = hci_dev_do_close(hdev);
819 hci_dev_put(hdev);
820 return err;
821}
822
823int hci_dev_reset(__u16 dev)
824{
825 struct hci_dev *hdev;
826 int ret = 0;
827
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200828 hdev = hci_dev_get(dev);
829 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 return -ENODEV;
831
832 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
834 if (!test_bit(HCI_UP, &hdev->flags))
835 goto done;
836
837 /* Drop queues */
838 skb_queue_purge(&hdev->rx_q);
839 skb_queue_purge(&hdev->cmd_q);
840
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300841 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 inquiry_cache_flush(hdev);
843 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300844 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846 if (hdev->flush)
847 hdev->flush(hdev);
848
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900849 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300850 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200853 ret = __hci_request(hdev, hci_reset_req, 0,
854 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 hci_req_unlock(hdev);
858 hci_dev_put(hdev);
859 return ret;
860}
861
862int hci_dev_reset_stat(__u16 dev)
863{
864 struct hci_dev *hdev;
865 int ret = 0;
866
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200867 hdev = hci_dev_get(dev);
868 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 return -ENODEV;
870
871 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
872
873 hci_dev_put(hdev);
874
875 return ret;
876}
877
878int hci_dev_cmd(unsigned int cmd, void __user *arg)
879{
880 struct hci_dev *hdev;
881 struct hci_dev_req dr;
882 int err = 0;
883
884 if (copy_from_user(&dr, arg, sizeof(dr)))
885 return -EFAULT;
886
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200887 hdev = hci_dev_get(dr.dev_id);
888 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 return -ENODEV;
890
891 switch (cmd) {
892 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200893 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
894 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 break;
896
897 case HCISETENCRYPT:
898 if (!lmp_encrypt_capable(hdev)) {
899 err = -EOPNOTSUPP;
900 break;
901 }
902
903 if (!test_bit(HCI_AUTH, &hdev->flags)) {
904 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200905 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
906 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 if (err)
908 break;
909 }
910
Marcel Holtmann04837f62006-07-03 10:02:33 +0200911 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
912 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914
915 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200916 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
917 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 break;
919
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200920 case HCISETLINKPOL:
921 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
922 msecs_to_jiffies(HCI_INIT_TIMEOUT));
923 break;
924
925 case HCISETLINKMODE:
926 hdev->link_mode = ((__u16) dr.dev_opt) &
927 (HCI_LM_MASTER | HCI_LM_ACCEPT);
928 break;
929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 case HCISETPTYPE:
931 hdev->pkt_type = (__u16) dr.dev_opt;
932 break;
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200935 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
936 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 break;
938
939 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200940 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
941 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 break;
943
944 default:
945 err = -EINVAL;
946 break;
947 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 hci_dev_put(hdev);
950 return err;
951}
952
953int hci_get_dev_list(void __user *arg)
954{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200955 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 struct hci_dev_list_req *dl;
957 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 int n = 0, size, err;
959 __u16 dev_num;
960
961 if (get_user(dev_num, (__u16 __user *) arg))
962 return -EFAULT;
963
964 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
965 return -EINVAL;
966
967 size = sizeof(*dl) + dev_num * sizeof(*dr);
968
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200969 dl = kzalloc(size, GFP_KERNEL);
970 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 return -ENOMEM;
972
973 dr = dl->dev_req;
974
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200975 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200976 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200977 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200978 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200979
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200980 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
981 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 (dr + n)->dev_id = hdev->id;
984 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (++n >= dev_num)
987 break;
988 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200989 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
991 dl->dev_num = n;
992 size = sizeof(*dl) + n * sizeof(*dr);
993
994 err = copy_to_user(arg, dl, size);
995 kfree(dl);
996
997 return err ? -EFAULT : 0;
998}
999
1000int hci_get_dev_info(void __user *arg)
1001{
1002 struct hci_dev *hdev;
1003 struct hci_dev_info di;
1004 int err = 0;
1005
1006 if (copy_from_user(&di, arg, sizeof(di)))
1007 return -EFAULT;
1008
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001009 hdev = hci_dev_get(di.dev_id);
1010 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 return -ENODEV;
1012
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001013 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001014 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001015
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001016 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1017 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001018
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 strcpy(di.name, hdev->name);
1020 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001021 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 di.flags = hdev->flags;
1023 di.pkt_type = hdev->pkt_type;
1024 di.acl_mtu = hdev->acl_mtu;
1025 di.acl_pkts = hdev->acl_pkts;
1026 di.sco_mtu = hdev->sco_mtu;
1027 di.sco_pkts = hdev->sco_pkts;
1028 di.link_policy = hdev->link_policy;
1029 di.link_mode = hdev->link_mode;
1030
1031 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1032 memcpy(&di.features, &hdev->features, sizeof(di.features));
1033
1034 if (copy_to_user(arg, &di, sizeof(di)))
1035 err = -EFAULT;
1036
1037 hci_dev_put(hdev);
1038
1039 return err;
1040}
1041
1042/* ---- Interface to HCI drivers ---- */
1043
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001044static int hci_rfkill_set_block(void *data, bool blocked)
1045{
1046 struct hci_dev *hdev = data;
1047
1048 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1049
1050 if (!blocked)
1051 return 0;
1052
1053 hci_dev_do_close(hdev);
1054
1055 return 0;
1056}
1057
1058static const struct rfkill_ops hci_rfkill_ops = {
1059 .set_block = hci_rfkill_set_block,
1060};
1061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062/* Alloc HCI device */
1063struct hci_dev *hci_alloc_dev(void)
1064{
1065 struct hci_dev *hdev;
1066
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001067 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 if (!hdev)
1069 return NULL;
1070
David Herrmann0ac7e702011-10-08 14:58:47 +02001071 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 skb_queue_head_init(&hdev->driver_init);
1073
1074 return hdev;
1075}
1076EXPORT_SYMBOL(hci_alloc_dev);
1077
1078/* Free HCI device */
1079void hci_free_dev(struct hci_dev *hdev)
1080{
1081 skb_queue_purge(&hdev->driver_init);
1082
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001083 /* will free via device release */
1084 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085}
1086EXPORT_SYMBOL(hci_free_dev);
1087
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001088static void hci_power_on(struct work_struct *work)
1089{
1090 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1091
1092 BT_DBG("%s", hdev->name);
1093
1094 if (hci_dev_open(hdev->id) < 0)
1095 return;
1096
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001097 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001098 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001099 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001100
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001101 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001102 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001103}
1104
1105static void hci_power_off(struct work_struct *work)
1106{
Johan Hedberg32435532011-11-07 22:16:04 +02001107 struct hci_dev *hdev = container_of(work, struct hci_dev,
1108 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001109
1110 BT_DBG("%s", hdev->name);
1111
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001112 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg32435532011-11-07 22:16:04 +02001113
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001114 hci_dev_close(hdev->id);
1115}
1116
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001117static void hci_discov_off(struct work_struct *work)
1118{
1119 struct hci_dev *hdev;
1120 u8 scan = SCAN_PAGE;
1121
1122 hdev = container_of(work, struct hci_dev, discov_off.work);
1123
1124 BT_DBG("%s", hdev->name);
1125
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001126 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001127
1128 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1129
1130 hdev->discov_timeout = 0;
1131
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001132 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001133}
1134
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001135int hci_uuids_clear(struct hci_dev *hdev)
1136{
1137 struct list_head *p, *n;
1138
1139 list_for_each_safe(p, n, &hdev->uuids) {
1140 struct bt_uuid *uuid;
1141
1142 uuid = list_entry(p, struct bt_uuid, list);
1143
1144 list_del(p);
1145 kfree(uuid);
1146 }
1147
1148 return 0;
1149}
1150
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001151int hci_link_keys_clear(struct hci_dev *hdev)
1152{
1153 struct list_head *p, *n;
1154
1155 list_for_each_safe(p, n, &hdev->link_keys) {
1156 struct link_key *key;
1157
1158 key = list_entry(p, struct link_key, list);
1159
1160 list_del(p);
1161 kfree(key);
1162 }
1163
1164 return 0;
1165}
1166
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001167int hci_smp_ltks_clear(struct hci_dev *hdev)
1168{
1169 struct smp_ltk *k, *tmp;
1170
1171 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1172 list_del(&k->list);
1173 kfree(k);
1174 }
1175
1176 return 0;
1177}
1178
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001179struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1180{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001181 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001182
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001183 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001184 if (bacmp(bdaddr, &k->bdaddr) == 0)
1185 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001186
1187 return NULL;
1188}
1189
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001190static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1191 u8 key_type, u8 old_key_type)
1192{
1193 /* Legacy key */
1194 if (key_type < 0x03)
1195 return 1;
1196
1197 /* Debug keys are insecure so don't store them persistently */
1198 if (key_type == HCI_LK_DEBUG_COMBINATION)
1199 return 0;
1200
1201 /* Changed combination key and there's no previous one */
1202 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1203 return 0;
1204
1205 /* Security mode 3 case */
1206 if (!conn)
1207 return 1;
1208
1209 /* Neither local nor remote side had no-bonding as requirement */
1210 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1211 return 1;
1212
1213 /* Local side had dedicated bonding as requirement */
1214 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1215 return 1;
1216
1217 /* Remote side had dedicated bonding as requirement */
1218 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1219 return 1;
1220
1221 /* If none of the above criteria match, then don't store the key
1222 * persistently */
1223 return 0;
1224}
1225
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001226struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001227{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001228 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001229
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001230 list_for_each_entry(k, &hdev->long_term_keys, list) {
1231 if (k->ediv != ediv ||
1232 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001233 continue;
1234
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001236 }
1237
1238 return NULL;
1239}
1240EXPORT_SYMBOL(hci_find_ltk);
1241
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001242struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1243 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001244{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001245 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001246
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001247 list_for_each_entry(k, &hdev->long_term_keys, list)
1248 if (addr_type == k->bdaddr_type &&
1249 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001250 return k;
1251
1252 return NULL;
1253}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001254EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001255
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001256int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1257 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001258{
1259 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001260 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001261
1262 old_key = hci_find_link_key(hdev, bdaddr);
1263 if (old_key) {
1264 old_key_type = old_key->type;
1265 key = old_key;
1266 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001267 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001268 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1269 if (!key)
1270 return -ENOMEM;
1271 list_add(&key->list, &hdev->link_keys);
1272 }
1273
1274 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1275
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001276 /* Some buggy controller combinations generate a changed
1277 * combination key for legacy pairing even when there's no
1278 * previous key */
1279 if (type == HCI_LK_CHANGED_COMBINATION &&
1280 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001281 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001282 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001283 if (conn)
1284 conn->key_type = type;
1285 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001286
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001287 bacpy(&key->bdaddr, bdaddr);
1288 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001289 key->pin_len = pin_len;
1290
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001291 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001292 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001293 else
1294 key->type = type;
1295
Johan Hedberg4df378a2011-04-28 11:29:03 -07001296 if (!new_key)
1297 return 0;
1298
1299 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1300
Johan Hedberg744cf192011-11-08 20:40:14 +02001301 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001302
1303 if (!persistent) {
1304 list_del(&key->list);
1305 kfree(key);
1306 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001307
1308 return 0;
1309}
1310
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001311int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1312 int new_key, u8 authenticated, u8 tk[16],
1313 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001314{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001315 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001316
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001317 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1318 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001319
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001320 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1321 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001322 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001323 else {
1324 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001325 if (!key)
1326 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001327 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001328 }
1329
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001330 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001331 key->bdaddr_type = addr_type;
1332 memcpy(key->val, tk, sizeof(key->val));
1333 key->authenticated = authenticated;
1334 key->ediv = ediv;
1335 key->enc_size = enc_size;
1336 key->type = type;
1337 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001338
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001339 if (!new_key)
1340 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001341
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001342 if (type & HCI_SMP_LTK)
1343 mgmt_new_ltk(hdev, key, 1);
1344
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001345 return 0;
1346}
1347
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001348int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1349{
1350 struct link_key *key;
1351
1352 key = hci_find_link_key(hdev, bdaddr);
1353 if (!key)
1354 return -ENOENT;
1355
1356 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1357
1358 list_del(&key->list);
1359 kfree(key);
1360
1361 return 0;
1362}
1363
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001364int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1365{
1366 struct smp_ltk *k, *tmp;
1367
1368 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1369 if (bacmp(bdaddr, &k->bdaddr))
1370 continue;
1371
1372 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1373
1374 list_del(&k->list);
1375 kfree(k);
1376 }
1377
1378 return 0;
1379}
1380
Ville Tervo6bd32322011-02-16 16:32:41 +02001381/* HCI command timer function */
1382static void hci_cmd_timer(unsigned long arg)
1383{
1384 struct hci_dev *hdev = (void *) arg;
1385
1386 BT_ERR("%s command tx timeout", hdev->name);
1387 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001388 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001389}
1390
Szymon Janc2763eda2011-03-22 13:12:22 +01001391struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1392 bdaddr_t *bdaddr)
1393{
1394 struct oob_data *data;
1395
1396 list_for_each_entry(data, &hdev->remote_oob_data, list)
1397 if (bacmp(bdaddr, &data->bdaddr) == 0)
1398 return data;
1399
1400 return NULL;
1401}
1402
1403int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1404{
1405 struct oob_data *data;
1406
1407 data = hci_find_remote_oob_data(hdev, bdaddr);
1408 if (!data)
1409 return -ENOENT;
1410
1411 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1412
1413 list_del(&data->list);
1414 kfree(data);
1415
1416 return 0;
1417}
1418
1419int hci_remote_oob_data_clear(struct hci_dev *hdev)
1420{
1421 struct oob_data *data, *n;
1422
1423 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1424 list_del(&data->list);
1425 kfree(data);
1426 }
1427
1428 return 0;
1429}
1430
1431int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1432 u8 *randomizer)
1433{
1434 struct oob_data *data;
1435
1436 data = hci_find_remote_oob_data(hdev, bdaddr);
1437
1438 if (!data) {
1439 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1440 if (!data)
1441 return -ENOMEM;
1442
1443 bacpy(&data->bdaddr, bdaddr);
1444 list_add(&data->list, &hdev->remote_oob_data);
1445 }
1446
1447 memcpy(data->hash, hash, sizeof(data->hash));
1448 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1449
1450 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1451
1452 return 0;
1453}
1454
Antti Julkub2a66aa2011-06-15 12:01:14 +03001455struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1456 bdaddr_t *bdaddr)
1457{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001458 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001459
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001460 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001461 if (bacmp(bdaddr, &b->bdaddr) == 0)
1462 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001463
1464 return NULL;
1465}
1466
1467int hci_blacklist_clear(struct hci_dev *hdev)
1468{
1469 struct list_head *p, *n;
1470
1471 list_for_each_safe(p, n, &hdev->blacklist) {
1472 struct bdaddr_list *b;
1473
1474 b = list_entry(p, struct bdaddr_list, list);
1475
1476 list_del(p);
1477 kfree(b);
1478 }
1479
1480 return 0;
1481}
1482
1483int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1484{
1485 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001486
1487 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1488 return -EBADF;
1489
Antti Julku5e762442011-08-25 16:48:02 +03001490 if (hci_blacklist_lookup(hdev, bdaddr))
1491 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001492
1493 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001494 if (!entry)
1495 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001496
1497 bacpy(&entry->bdaddr, bdaddr);
1498
1499 list_add(&entry->list, &hdev->blacklist);
1500
Johan Hedberg744cf192011-11-08 20:40:14 +02001501 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001502}
1503
1504int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1505{
1506 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001507
Szymon Janc1ec918c2011-11-16 09:32:21 +01001508 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001509 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001510
1511 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001512 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001513 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001514
1515 list_del(&entry->list);
1516 kfree(entry);
1517
Johan Hedberg744cf192011-11-08 20:40:14 +02001518 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001519}
1520
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001521static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001522{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001523 struct hci_dev *hdev = container_of(work, struct hci_dev,
1524 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001525
1526 hci_dev_lock(hdev);
1527
1528 hci_adv_entries_clear(hdev);
1529
1530 hci_dev_unlock(hdev);
1531}
1532
Andre Guedes76c86862011-05-26 16:23:50 -03001533int hci_adv_entries_clear(struct hci_dev *hdev)
1534{
1535 struct adv_entry *entry, *tmp;
1536
1537 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1538 list_del(&entry->list);
1539 kfree(entry);
1540 }
1541
1542 BT_DBG("%s adv cache cleared", hdev->name);
1543
1544 return 0;
1545}
1546
1547struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1548{
1549 struct adv_entry *entry;
1550
1551 list_for_each_entry(entry, &hdev->adv_entries, list)
1552 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1553 return entry;
1554
1555 return NULL;
1556}
1557
1558static inline int is_connectable_adv(u8 evt_type)
1559{
1560 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1561 return 1;
1562
1563 return 0;
1564}
1565
1566int hci_add_adv_entry(struct hci_dev *hdev,
1567 struct hci_ev_le_advertising_info *ev)
1568{
1569 struct adv_entry *entry;
1570
1571 if (!is_connectable_adv(ev->evt_type))
1572 return -EINVAL;
1573
1574 /* Only new entries should be added to adv_entries. So, if
1575 * bdaddr was found, don't add it. */
1576 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1577 return 0;
1578
Andre Guedes4777bfd2012-01-30 23:31:28 -03001579 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001580 if (!entry)
1581 return -ENOMEM;
1582
1583 bacpy(&entry->bdaddr, &ev->bdaddr);
1584 entry->bdaddr_type = ev->bdaddr_type;
1585
1586 list_add(&entry->list, &hdev->adv_entries);
1587
1588 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1589 batostr(&entry->bdaddr), entry->bdaddr_type);
1590
1591 return 0;
1592}
1593
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594/* Register HCI device */
1595int hci_register_dev(struct hci_dev *hdev)
1596{
1597 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001598 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001600 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601
David Herrmann010666a2012-01-07 15:47:07 +01001602 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 return -EINVAL;
1604
Mat Martineau08add512011-11-02 16:18:36 -07001605 /* Do not allow HCI_AMP devices to register at index 0,
1606 * so the index can be used as the AMP controller ID.
1607 */
1608 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1609
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001610 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
1612 /* Find first available device id */
1613 list_for_each(p, &hci_dev_list) {
1614 if (list_entry(p, struct hci_dev, list)->id != id)
1615 break;
1616 head = p; id++;
1617 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001618
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 sprintf(hdev->name, "hci%d", id);
1620 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001621 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001623 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624
1625 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001626 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001628 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001630 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
Marcel Holtmann04837f62006-07-03 10:02:33 +02001632 hdev->idle_timeout = 0;
1633 hdev->sniff_max_interval = 800;
1634 hdev->sniff_min_interval = 80;
1635
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001636 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001637 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001638 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001639
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640
1641 skb_queue_head_init(&hdev->rx_q);
1642 skb_queue_head_init(&hdev->cmd_q);
1643 skb_queue_head_init(&hdev->raw_q);
1644
Ville Tervo6bd32322011-02-16 16:32:41 +02001645 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1646
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301647 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001648 hdev->reassembly[i] = NULL;
1649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001651 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
Johan Hedberg30883512012-01-04 14:16:21 +02001653 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
1655 hci_conn_hash_init(hdev);
1656
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001657 INIT_LIST_HEAD(&hdev->mgmt_pending);
1658
David Millerea4bd8b2010-07-30 21:54:49 -07001659 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001660
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001661 INIT_LIST_HEAD(&hdev->uuids);
1662
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001663 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001664 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001665
Szymon Janc2763eda2011-03-22 13:12:22 +01001666 INIT_LIST_HEAD(&hdev->remote_oob_data);
1667
Andre Guedes76c86862011-05-26 16:23:50 -03001668 INIT_LIST_HEAD(&hdev->adv_entries);
1669
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001670 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001671 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001672 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001673
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001674 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1675
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1677
1678 atomic_set(&hdev->promisc, 0);
1679
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001680 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001682 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1683 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001684 if (!hdev->workqueue) {
1685 error = -ENOMEM;
1686 goto err;
1687 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001688
David Herrmann33ca9542011-10-08 14:58:49 +02001689 error = hci_add_sysfs(hdev);
1690 if (error < 0)
1691 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001693 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1694 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1695 if (hdev->rfkill) {
1696 if (rfkill_register(hdev->rfkill) < 0) {
1697 rfkill_destroy(hdev->rfkill);
1698 hdev->rfkill = NULL;
1699 }
1700 }
1701
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001702 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1703 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001704 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001705
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001707 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001710
David Herrmann33ca9542011-10-08 14:58:49 +02001711err_wqueue:
1712 destroy_workqueue(hdev->workqueue);
1713err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001714 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001715 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001716 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001717
David Herrmann33ca9542011-10-08 14:58:49 +02001718 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719}
1720EXPORT_SYMBOL(hci_register_dev);
1721
1722/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001723void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724{
Marcel Holtmannef222012007-07-11 06:42:04 +02001725 int i;
1726
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001727 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001729 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001731 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
1733 hci_dev_do_close(hdev);
1734
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301735 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001736 kfree_skb(hdev->reassembly[i]);
1737
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001738 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001739 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001740 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001741 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001742 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001743 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001744
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001745 /* mgmt_index_removed should take care of emptying the
1746 * pending list */
1747 BUG_ON(!list_empty(&hdev->mgmt_pending));
1748
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 hci_notify(hdev, HCI_DEV_UNREG);
1750
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001751 if (hdev->rfkill) {
1752 rfkill_unregister(hdev->rfkill);
1753 rfkill_destroy(hdev->rfkill);
1754 }
1755
David Herrmannce242972011-10-08 14:58:48 +02001756 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001757
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001758 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001759
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001760 destroy_workqueue(hdev->workqueue);
1761
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001762 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001763 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001764 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001765 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001766 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001767 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001768 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001769 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001770
David Herrmanndc946bd2012-01-07 15:47:24 +01001771 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772}
1773EXPORT_SYMBOL(hci_unregister_dev);
1774
1775/* Suspend HCI device */
1776int hci_suspend_dev(struct hci_dev *hdev)
1777{
1778 hci_notify(hdev, HCI_DEV_SUSPEND);
1779 return 0;
1780}
1781EXPORT_SYMBOL(hci_suspend_dev);
1782
1783/* Resume HCI device */
1784int hci_resume_dev(struct hci_dev *hdev)
1785{
1786 hci_notify(hdev, HCI_DEV_RESUME);
1787 return 0;
1788}
1789EXPORT_SYMBOL(hci_resume_dev);
1790
Marcel Holtmann76bca882009-11-18 00:40:39 +01001791/* Receive frame from HCI drivers */
1792int hci_recv_frame(struct sk_buff *skb)
1793{
1794 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1795 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1796 && !test_bit(HCI_INIT, &hdev->flags))) {
1797 kfree_skb(skb);
1798 return -ENXIO;
1799 }
1800
1801 /* Incomming skb */
1802 bt_cb(skb)->incoming = 1;
1803
1804 /* Time stamp */
1805 __net_timestamp(skb);
1806
Marcel Holtmann76bca882009-11-18 00:40:39 +01001807 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001808 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001809
Marcel Holtmann76bca882009-11-18 00:40:39 +01001810 return 0;
1811}
1812EXPORT_SYMBOL(hci_recv_frame);
1813
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301814static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001815 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301816{
1817 int len = 0;
1818 int hlen = 0;
1819 int remain = count;
1820 struct sk_buff *skb;
1821 struct bt_skb_cb *scb;
1822
1823 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1824 index >= NUM_REASSEMBLY)
1825 return -EILSEQ;
1826
1827 skb = hdev->reassembly[index];
1828
1829 if (!skb) {
1830 switch (type) {
1831 case HCI_ACLDATA_PKT:
1832 len = HCI_MAX_FRAME_SIZE;
1833 hlen = HCI_ACL_HDR_SIZE;
1834 break;
1835 case HCI_EVENT_PKT:
1836 len = HCI_MAX_EVENT_SIZE;
1837 hlen = HCI_EVENT_HDR_SIZE;
1838 break;
1839 case HCI_SCODATA_PKT:
1840 len = HCI_MAX_SCO_SIZE;
1841 hlen = HCI_SCO_HDR_SIZE;
1842 break;
1843 }
1844
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001845 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301846 if (!skb)
1847 return -ENOMEM;
1848
1849 scb = (void *) skb->cb;
1850 scb->expect = hlen;
1851 scb->pkt_type = type;
1852
1853 skb->dev = (void *) hdev;
1854 hdev->reassembly[index] = skb;
1855 }
1856
1857 while (count) {
1858 scb = (void *) skb->cb;
1859 len = min(scb->expect, (__u16)count);
1860
1861 memcpy(skb_put(skb, len), data, len);
1862
1863 count -= len;
1864 data += len;
1865 scb->expect -= len;
1866 remain = count;
1867
1868 switch (type) {
1869 case HCI_EVENT_PKT:
1870 if (skb->len == HCI_EVENT_HDR_SIZE) {
1871 struct hci_event_hdr *h = hci_event_hdr(skb);
1872 scb->expect = h->plen;
1873
1874 if (skb_tailroom(skb) < scb->expect) {
1875 kfree_skb(skb);
1876 hdev->reassembly[index] = NULL;
1877 return -ENOMEM;
1878 }
1879 }
1880 break;
1881
1882 case HCI_ACLDATA_PKT:
1883 if (skb->len == HCI_ACL_HDR_SIZE) {
1884 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1885 scb->expect = __le16_to_cpu(h->dlen);
1886
1887 if (skb_tailroom(skb) < scb->expect) {
1888 kfree_skb(skb);
1889 hdev->reassembly[index] = NULL;
1890 return -ENOMEM;
1891 }
1892 }
1893 break;
1894
1895 case HCI_SCODATA_PKT:
1896 if (skb->len == HCI_SCO_HDR_SIZE) {
1897 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1898 scb->expect = h->dlen;
1899
1900 if (skb_tailroom(skb) < scb->expect) {
1901 kfree_skb(skb);
1902 hdev->reassembly[index] = NULL;
1903 return -ENOMEM;
1904 }
1905 }
1906 break;
1907 }
1908
1909 if (scb->expect == 0) {
1910 /* Complete frame */
1911
1912 bt_cb(skb)->pkt_type = type;
1913 hci_recv_frame(skb);
1914
1915 hdev->reassembly[index] = NULL;
1916 return remain;
1917 }
1918 }
1919
1920 return remain;
1921}
1922
Marcel Holtmannef222012007-07-11 06:42:04 +02001923int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1924{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301925 int rem = 0;
1926
Marcel Holtmannef222012007-07-11 06:42:04 +02001927 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1928 return -EILSEQ;
1929
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001930 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001931 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301932 if (rem < 0)
1933 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001934
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301935 data += (count - rem);
1936 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001937 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001938
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301939 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001940}
1941EXPORT_SYMBOL(hci_recv_fragment);
1942
Suraj Sumangala99811512010-07-14 13:02:19 +05301943#define STREAM_REASSEMBLY 0
1944
1945int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1946{
1947 int type;
1948 int rem = 0;
1949
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001950 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301951 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1952
1953 if (!skb) {
1954 struct { char type; } *pkt;
1955
1956 /* Start of the frame */
1957 pkt = data;
1958 type = pkt->type;
1959
1960 data++;
1961 count--;
1962 } else
1963 type = bt_cb(skb)->pkt_type;
1964
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001965 rem = hci_reassembly(hdev, type, data, count,
1966 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301967 if (rem < 0)
1968 return rem;
1969
1970 data += (count - rem);
1971 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001972 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301973
1974 return rem;
1975}
1976EXPORT_SYMBOL(hci_recv_stream_fragment);
1977
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978/* ---- Interface to upper protocols ---- */
1979
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980int hci_register_cb(struct hci_cb *cb)
1981{
1982 BT_DBG("%p name %s", cb, cb->name);
1983
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001984 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001986 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
1988 return 0;
1989}
1990EXPORT_SYMBOL(hci_register_cb);
1991
1992int hci_unregister_cb(struct hci_cb *cb)
1993{
1994 BT_DBG("%p name %s", cb, cb->name);
1995
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001996 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001998 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
2000 return 0;
2001}
2002EXPORT_SYMBOL(hci_unregister_cb);
2003
2004static int hci_send_frame(struct sk_buff *skb)
2005{
2006 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2007
2008 if (!hdev) {
2009 kfree_skb(skb);
2010 return -ENODEV;
2011 }
2012
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002013 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 if (atomic_read(&hdev->promisc)) {
2016 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002017 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002019 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 }
2021
2022 /* Get rid of skb owner, prior to sending to the driver. */
2023 skb_orphan(skb);
2024
2025 return hdev->send(skb);
2026}
2027
2028/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002029int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030{
2031 int len = HCI_COMMAND_HDR_SIZE + plen;
2032 struct hci_command_hdr *hdr;
2033 struct sk_buff *skb;
2034
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002035 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
2037 skb = bt_skb_alloc(len, GFP_ATOMIC);
2038 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002039 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 return -ENOMEM;
2041 }
2042
2043 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002044 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 hdr->plen = plen;
2046
2047 if (plen)
2048 memcpy(skb_put(skb, plen), param, plen);
2049
2050 BT_DBG("skb len %d", skb->len);
2051
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002052 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002054
Johan Hedberga5040ef2011-01-10 13:28:59 +02002055 if (test_bit(HCI_INIT, &hdev->flags))
2056 hdev->init_last_cmd = opcode;
2057
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002059 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 return 0;
2062}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
2064/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002065void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066{
2067 struct hci_command_hdr *hdr;
2068
2069 if (!hdev->sent_cmd)
2070 return NULL;
2071
2072 hdr = (void *) hdev->sent_cmd->data;
2073
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002074 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 return NULL;
2076
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002077 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
2079 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2080}
2081
2082/* Send ACL data */
2083static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2084{
2085 struct hci_acl_hdr *hdr;
2086 int len = skb->len;
2087
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002088 skb_push(skb, HCI_ACL_HDR_SIZE);
2089 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002090 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002091 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2092 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093}
2094
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002095static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2096 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097{
2098 struct hci_dev *hdev = conn->hdev;
2099 struct sk_buff *list;
2100
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002101 list = skb_shinfo(skb)->frag_list;
2102 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 /* Non fragmented */
2104 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2105
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002106 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 } else {
2108 /* Fragmented */
2109 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2110
2111 skb_shinfo(skb)->frag_list = NULL;
2112
2113 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002114 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002116 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002117
2118 flags &= ~ACL_START;
2119 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 do {
2121 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002124 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002125 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
2127 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2128
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002129 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 } while (list);
2131
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002132 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002134}
2135
2136void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2137{
2138 struct hci_conn *conn = chan->conn;
2139 struct hci_dev *hdev = conn->hdev;
2140
2141 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2142
2143 skb->dev = (void *) hdev;
2144 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2145 hci_add_acl_hdr(skb, conn->handle, flags);
2146
2147 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002149 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150}
2151EXPORT_SYMBOL(hci_send_acl);
2152
2153/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002154void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155{
2156 struct hci_dev *hdev = conn->hdev;
2157 struct hci_sco_hdr hdr;
2158
2159 BT_DBG("%s len %d", hdev->name, skb->len);
2160
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002161 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 hdr.dlen = skb->len;
2163
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002164 skb_push(skb, HCI_SCO_HDR_SIZE);
2165 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002166 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
2168 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002169 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002170
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002172 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173}
2174EXPORT_SYMBOL(hci_send_sco);
2175
2176/* ---- HCI TX task (outgoing data) ---- */
2177
2178/* HCI Connection scheduler */
2179static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2180{
2181 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002182 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002185 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002187
2188 rcu_read_lock();
2189
2190 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002191 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002193
2194 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2195 continue;
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 num++;
2198
2199 if (c->sent < min) {
2200 min = c->sent;
2201 conn = c;
2202 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002203
2204 if (hci_conn_num(hdev, type) == num)
2205 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 }
2207
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002208 rcu_read_unlock();
2209
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002211 int cnt, q;
2212
2213 switch (conn->type) {
2214 case ACL_LINK:
2215 cnt = hdev->acl_cnt;
2216 break;
2217 case SCO_LINK:
2218 case ESCO_LINK:
2219 cnt = hdev->sco_cnt;
2220 break;
2221 case LE_LINK:
2222 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2223 break;
2224 default:
2225 cnt = 0;
2226 BT_ERR("Unknown link type");
2227 }
2228
2229 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 *quote = q ? q : 1;
2231 } else
2232 *quote = 0;
2233
2234 BT_DBG("conn %p quote %d", conn, *quote);
2235 return conn;
2236}
2237
Ville Tervobae1f5d92011-02-10 22:38:53 -03002238static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239{
2240 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002241 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
Ville Tervobae1f5d92011-02-10 22:38:53 -03002243 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002245 rcu_read_lock();
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002248 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002249 if (c->type == type && c->sent) {
2250 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 hdev->name, batostr(&c->dst));
2252 hci_acl_disconn(c, 0x13);
2253 }
2254 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002255
2256 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257}
2258
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002259static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2260 int *quote)
2261{
2262 struct hci_conn_hash *h = &hdev->conn_hash;
2263 struct hci_chan *chan = NULL;
2264 int num = 0, min = ~0, cur_prio = 0;
2265 struct hci_conn *conn;
2266 int cnt, q, conn_num = 0;
2267
2268 BT_DBG("%s", hdev->name);
2269
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002270 rcu_read_lock();
2271
2272 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002273 struct hci_chan *tmp;
2274
2275 if (conn->type != type)
2276 continue;
2277
2278 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2279 continue;
2280
2281 conn_num++;
2282
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002283 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002284 struct sk_buff *skb;
2285
2286 if (skb_queue_empty(&tmp->data_q))
2287 continue;
2288
2289 skb = skb_peek(&tmp->data_q);
2290 if (skb->priority < cur_prio)
2291 continue;
2292
2293 if (skb->priority > cur_prio) {
2294 num = 0;
2295 min = ~0;
2296 cur_prio = skb->priority;
2297 }
2298
2299 num++;
2300
2301 if (conn->sent < min) {
2302 min = conn->sent;
2303 chan = tmp;
2304 }
2305 }
2306
2307 if (hci_conn_num(hdev, type) == conn_num)
2308 break;
2309 }
2310
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002311 rcu_read_unlock();
2312
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002313 if (!chan)
2314 return NULL;
2315
2316 switch (chan->conn->type) {
2317 case ACL_LINK:
2318 cnt = hdev->acl_cnt;
2319 break;
2320 case SCO_LINK:
2321 case ESCO_LINK:
2322 cnt = hdev->sco_cnt;
2323 break;
2324 case LE_LINK:
2325 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2326 break;
2327 default:
2328 cnt = 0;
2329 BT_ERR("Unknown link type");
2330 }
2331
2332 q = cnt / num;
2333 *quote = q ? q : 1;
2334 BT_DBG("chan %p quote %d", chan, *quote);
2335 return chan;
2336}
2337
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002338static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2339{
2340 struct hci_conn_hash *h = &hdev->conn_hash;
2341 struct hci_conn *conn;
2342 int num = 0;
2343
2344 BT_DBG("%s", hdev->name);
2345
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002346 rcu_read_lock();
2347
2348 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002349 struct hci_chan *chan;
2350
2351 if (conn->type != type)
2352 continue;
2353
2354 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2355 continue;
2356
2357 num++;
2358
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002359 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002360 struct sk_buff *skb;
2361
2362 if (chan->sent) {
2363 chan->sent = 0;
2364 continue;
2365 }
2366
2367 if (skb_queue_empty(&chan->data_q))
2368 continue;
2369
2370 skb = skb_peek(&chan->data_q);
2371 if (skb->priority >= HCI_PRIO_MAX - 1)
2372 continue;
2373
2374 skb->priority = HCI_PRIO_MAX - 1;
2375
2376 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2377 skb->priority);
2378 }
2379
2380 if (hci_conn_num(hdev, type) == num)
2381 break;
2382 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002383
2384 rcu_read_unlock();
2385
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002386}
2387
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002388static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2389{
2390 /* Calculate count of blocks used by this packet */
2391 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2392}
2393
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002394static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 if (!test_bit(HCI_RAW, &hdev->flags)) {
2397 /* ACL tx timeout must be longer than maximum
2398 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002399 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002400 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002401 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002403}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002405static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2406{
2407 unsigned int cnt = hdev->acl_cnt;
2408 struct hci_chan *chan;
2409 struct sk_buff *skb;
2410 int quote;
2411
2412 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002413
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002414 while (hdev->acl_cnt &&
2415 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002416 u32 priority = (skb_peek(&chan->data_q))->priority;
2417 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002418 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2419 skb->len, skb->priority);
2420
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002421 /* Stop if priority has changed */
2422 if (skb->priority < priority)
2423 break;
2424
2425 skb = skb_dequeue(&chan->data_q);
2426
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002427 hci_conn_enter_active_mode(chan->conn,
2428 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002429
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 hci_send_frame(skb);
2431 hdev->acl_last_tx = jiffies;
2432
2433 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002434 chan->sent++;
2435 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 }
2437 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002438
2439 if (cnt != hdev->acl_cnt)
2440 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441}
2442
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002443static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2444{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002445 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002446 struct hci_chan *chan;
2447 struct sk_buff *skb;
2448 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002449
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002450 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002451
2452 while (hdev->block_cnt > 0 &&
2453 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2454 u32 priority = (skb_peek(&chan->data_q))->priority;
2455 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2456 int blocks;
2457
2458 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2459 skb->len, skb->priority);
2460
2461 /* Stop if priority has changed */
2462 if (skb->priority < priority)
2463 break;
2464
2465 skb = skb_dequeue(&chan->data_q);
2466
2467 blocks = __get_blocks(hdev, skb);
2468 if (blocks > hdev->block_cnt)
2469 return;
2470
2471 hci_conn_enter_active_mode(chan->conn,
2472 bt_cb(skb)->force_active);
2473
2474 hci_send_frame(skb);
2475 hdev->acl_last_tx = jiffies;
2476
2477 hdev->block_cnt -= blocks;
2478 quote -= blocks;
2479
2480 chan->sent += blocks;
2481 chan->conn->sent += blocks;
2482 }
2483 }
2484
2485 if (cnt != hdev->block_cnt)
2486 hci_prio_recalculate(hdev, ACL_LINK);
2487}
2488
2489static inline void hci_sched_acl(struct hci_dev *hdev)
2490{
2491 BT_DBG("%s", hdev->name);
2492
2493 if (!hci_conn_num(hdev, ACL_LINK))
2494 return;
2495
2496 switch (hdev->flow_ctl_mode) {
2497 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2498 hci_sched_acl_pkt(hdev);
2499 break;
2500
2501 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2502 hci_sched_acl_blk(hdev);
2503 break;
2504 }
2505}
2506
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507/* Schedule SCO */
2508static inline void hci_sched_sco(struct hci_dev *hdev)
2509{
2510 struct hci_conn *conn;
2511 struct sk_buff *skb;
2512 int quote;
2513
2514 BT_DBG("%s", hdev->name);
2515
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002516 if (!hci_conn_num(hdev, SCO_LINK))
2517 return;
2518
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2520 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2521 BT_DBG("skb %p len %d", skb, skb->len);
2522 hci_send_frame(skb);
2523
2524 conn->sent++;
2525 if (conn->sent == ~0)
2526 conn->sent = 0;
2527 }
2528 }
2529}
2530
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002531static inline void hci_sched_esco(struct hci_dev *hdev)
2532{
2533 struct hci_conn *conn;
2534 struct sk_buff *skb;
2535 int quote;
2536
2537 BT_DBG("%s", hdev->name);
2538
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002539 if (!hci_conn_num(hdev, ESCO_LINK))
2540 return;
2541
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002542 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2543 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2544 BT_DBG("skb %p len %d", skb, skb->len);
2545 hci_send_frame(skb);
2546
2547 conn->sent++;
2548 if (conn->sent == ~0)
2549 conn->sent = 0;
2550 }
2551 }
2552}
2553
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002554static inline void hci_sched_le(struct hci_dev *hdev)
2555{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002556 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002557 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002558 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002559
2560 BT_DBG("%s", hdev->name);
2561
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002562 if (!hci_conn_num(hdev, LE_LINK))
2563 return;
2564
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002565 if (!test_bit(HCI_RAW, &hdev->flags)) {
2566 /* LE tx timeout must be longer than maximum
2567 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002568 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002569 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002570 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002571 }
2572
2573 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002574 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002575 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002576 u32 priority = (skb_peek(&chan->data_q))->priority;
2577 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002578 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2579 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002580
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002581 /* Stop if priority has changed */
2582 if (skb->priority < priority)
2583 break;
2584
2585 skb = skb_dequeue(&chan->data_q);
2586
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002587 hci_send_frame(skb);
2588 hdev->le_last_tx = jiffies;
2589
2590 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002591 chan->sent++;
2592 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002593 }
2594 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002595
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002596 if (hdev->le_pkts)
2597 hdev->le_cnt = cnt;
2598 else
2599 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002600
2601 if (cnt != tmp)
2602 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002603}
2604
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002605static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002607 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 struct sk_buff *skb;
2609
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002610 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2611 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612
2613 /* Schedule queues and send stuff to HCI driver */
2614
2615 hci_sched_acl(hdev);
2616
2617 hci_sched_sco(hdev);
2618
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002619 hci_sched_esco(hdev);
2620
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002621 hci_sched_le(hdev);
2622
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 /* Send next queued raw (unknown type) packet */
2624 while ((skb = skb_dequeue(&hdev->raw_q)))
2625 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626}
2627
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002628/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
2630/* ACL data packet */
2631static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2632{
2633 struct hci_acl_hdr *hdr = (void *) skb->data;
2634 struct hci_conn *conn;
2635 __u16 handle, flags;
2636
2637 skb_pull(skb, HCI_ACL_HDR_SIZE);
2638
2639 handle = __le16_to_cpu(hdr->handle);
2640 flags = hci_flags(handle);
2641 handle = hci_handle(handle);
2642
2643 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2644
2645 hdev->stat.acl_rx++;
2646
2647 hci_dev_lock(hdev);
2648 conn = hci_conn_hash_lookup_handle(hdev, handle);
2649 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002650
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002652 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002653
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002655 l2cap_recv_acldata(conn, skb, flags);
2656 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002658 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 hdev->name, handle);
2660 }
2661
2662 kfree_skb(skb);
2663}
2664
2665/* SCO data packet */
2666static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2667{
2668 struct hci_sco_hdr *hdr = (void *) skb->data;
2669 struct hci_conn *conn;
2670 __u16 handle;
2671
2672 skb_pull(skb, HCI_SCO_HDR_SIZE);
2673
2674 handle = __le16_to_cpu(hdr->handle);
2675
2676 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2677
2678 hdev->stat.sco_rx++;
2679
2680 hci_dev_lock(hdev);
2681 conn = hci_conn_hash_lookup_handle(hdev, handle);
2682 hci_dev_unlock(hdev);
2683
2684 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002686 sco_recv_scodata(conn, skb);
2687 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002689 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 hdev->name, handle);
2691 }
2692
2693 kfree_skb(skb);
2694}
2695
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002696static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002698 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 struct sk_buff *skb;
2700
2701 BT_DBG("%s", hdev->name);
2702
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 while ((skb = skb_dequeue(&hdev->rx_q))) {
2704 if (atomic_read(&hdev->promisc)) {
2705 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002706 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 }
2708
2709 if (test_bit(HCI_RAW, &hdev->flags)) {
2710 kfree_skb(skb);
2711 continue;
2712 }
2713
2714 if (test_bit(HCI_INIT, &hdev->flags)) {
2715 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002716 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 case HCI_ACLDATA_PKT:
2718 case HCI_SCODATA_PKT:
2719 kfree_skb(skb);
2720 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 }
2723
2724 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002725 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002727 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 hci_event_packet(hdev, skb);
2729 break;
2730
2731 case HCI_ACLDATA_PKT:
2732 BT_DBG("%s ACL data packet", hdev->name);
2733 hci_acldata_packet(hdev, skb);
2734 break;
2735
2736 case HCI_SCODATA_PKT:
2737 BT_DBG("%s SCO data packet", hdev->name);
2738 hci_scodata_packet(hdev, skb);
2739 break;
2740
2741 default:
2742 kfree_skb(skb);
2743 break;
2744 }
2745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746}
2747
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002748static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002750 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 struct sk_buff *skb;
2752
2753 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2754
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002756 if (atomic_read(&hdev->cmd_cnt)) {
2757 skb = skb_dequeue(&hdev->cmd_q);
2758 if (!skb)
2759 return;
2760
Wei Yongjun7585b972009-02-25 18:29:52 +08002761 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002763 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2764 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 atomic_dec(&hdev->cmd_cnt);
2766 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002767 if (test_bit(HCI_RESET, &hdev->flags))
2768 del_timer(&hdev->cmd_timer);
2769 else
2770 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002771 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 } else {
2773 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002774 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 }
2776 }
2777}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002778
2779int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2780{
2781 /* General inquiry access code (GIAC) */
2782 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2783 struct hci_cp_inquiry cp;
2784
2785 BT_DBG("%s", hdev->name);
2786
2787 if (test_bit(HCI_INQUIRY, &hdev->flags))
2788 return -EINPROGRESS;
2789
Johan Hedberg46632622012-01-02 16:06:08 +02002790 inquiry_cache_flush(hdev);
2791
Andre Guedes2519a1f2011-11-07 11:45:24 -03002792 memset(&cp, 0, sizeof(cp));
2793 memcpy(&cp.lap, lap, sizeof(cp.lap));
2794 cp.length = length;
2795
2796 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2797}
Andre Guedes023d50492011-11-04 14:16:52 -03002798
2799int hci_cancel_inquiry(struct hci_dev *hdev)
2800{
2801 BT_DBG("%s", hdev->name);
2802
2803 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2804 return -EPERM;
2805
2806 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2807}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002808
2809module_param(enable_hs, bool, 0644);
2810MODULE_PARM_DESC(enable_hs, "Enable High Speed");