blob: b687192306015a138b60ebb8c56a3adf026616c3 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058int enable_hs;
59
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200358
359void hci_discovery_set_state(struct hci_dev *hdev, int state)
360{
361 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
362
363 if (hdev->discovery.state == state)
364 return;
365
366 switch (state) {
367 case DISCOVERY_STOPPED:
368 mgmt_discovering(hdev, 0);
369 break;
370 case DISCOVERY_STARTING:
371 break;
372 case DISCOVERY_ACTIVE:
373 mgmt_discovering(hdev, 1);
374 break;
375 case DISCOVERY_STOPPING:
376 break;
377 }
378
379 hdev->discovery.state = state;
380}
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382static void inquiry_cache_flush(struct hci_dev *hdev)
383{
Johan Hedberg30883512012-01-04 14:16:21 +0200384 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200385 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Johan Hedberg561aafb2012-01-04 13:31:59 +0200387 list_for_each_entry_safe(p, n, &cache->all, all) {
388 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200389 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200391
392 INIT_LIST_HEAD(&cache->unknown);
393 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200394 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395}
396
397struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
398{
Johan Hedberg30883512012-01-04 14:16:21 +0200399 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 struct inquiry_entry *e;
401
402 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
403
Johan Hedberg561aafb2012-01-04 13:31:59 +0200404 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200406 return e;
407 }
408
409 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410}
411
Johan Hedberg561aafb2012-01-04 13:31:59 +0200412struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
413 bdaddr_t *bdaddr)
414{
Johan Hedberg30883512012-01-04 14:16:21 +0200415 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200416 struct inquiry_entry *e;
417
418 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
419
420 list_for_each_entry(e, &cache->unknown, list) {
421 if (!bacmp(&e->data.bdaddr, bdaddr))
422 return e;
423 }
424
425 return NULL;
426}
427
Johan Hedberg31754052012-01-04 13:39:52 +0200428bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200429 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430{
Johan Hedberg30883512012-01-04 14:16:21 +0200431 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200432 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
435
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200436 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200437 if (ie)
438 goto update;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200439
Johan Hedberg561aafb2012-01-04 13:31:59 +0200440 /* Entry not in the cache. Add new one. */
441 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
442 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200443 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200444
445 list_add(&ie->all, &cache->all);
446
447 if (name_known) {
448 ie->name_state = NAME_KNOWN;
449 } else {
450 ie->name_state = NAME_NOT_KNOWN;
451 list_add(&ie->list, &cache->unknown);
452 }
453
454update:
455 if (name_known && ie->name_state != NAME_KNOWN &&
456 ie->name_state != NAME_PENDING) {
457 ie->name_state = NAME_KNOWN;
458 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 }
460
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200461 memcpy(&ie->data, data, sizeof(*data));
462 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200464
465 if (ie->name_state == NAME_NOT_KNOWN)
466 return false;
467
468 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
470
471static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
472{
Johan Hedberg30883512012-01-04 14:16:21 +0200473 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 struct inquiry_info *info = (struct inquiry_info *) buf;
475 struct inquiry_entry *e;
476 int copied = 0;
477
Johan Hedberg561aafb2012-01-04 13:31:59 +0200478 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200480
481 if (copied >= num)
482 break;
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 bacpy(&info->bdaddr, &data->bdaddr);
485 info->pscan_rep_mode = data->pscan_rep_mode;
486 info->pscan_period_mode = data->pscan_period_mode;
487 info->pscan_mode = data->pscan_mode;
488 memcpy(info->dev_class, data->dev_class, 3);
489 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200492 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 }
494
495 BT_DBG("cache %p, copied %d", cache, copied);
496 return copied;
497}
498
499static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
500{
501 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
502 struct hci_cp_inquiry cp;
503
504 BT_DBG("%s", hdev->name);
505
506 if (test_bit(HCI_INQUIRY, &hdev->flags))
507 return;
508
509 /* Start Inquiry */
510 memcpy(&cp.lap, &ir->lap, 3);
511 cp.length = ir->length;
512 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200513 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
516int hci_inquiry(void __user *arg)
517{
518 __u8 __user *ptr = arg;
519 struct hci_inquiry_req ir;
520 struct hci_dev *hdev;
521 int err = 0, do_inquiry = 0, max_rsp;
522 long timeo;
523 __u8 *buf;
524
525 if (copy_from_user(&ir, ptr, sizeof(ir)))
526 return -EFAULT;
527
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200528 hdev = hci_dev_get(ir.dev_id);
529 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 return -ENODEV;
531
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300532 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900533 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200534 inquiry_cache_empty(hdev) ||
535 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 inquiry_cache_flush(hdev);
537 do_inquiry = 1;
538 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300539 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Marcel Holtmann04837f62006-07-03 10:02:33 +0200541 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200542
543 if (do_inquiry) {
544 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
545 if (err < 0)
546 goto done;
547 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
549 /* for unlimited number of responses we will use buffer with 255 entries */
550 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
551
552 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
553 * copy it to the user space.
554 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100555 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200556 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 err = -ENOMEM;
558 goto done;
559 }
560
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300561 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300563 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
565 BT_DBG("num_rsp %d", ir.num_rsp);
566
567 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
568 ptr += sizeof(ir);
569 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
570 ir.num_rsp))
571 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900572 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 err = -EFAULT;
574
575 kfree(buf);
576
577done:
578 hci_dev_put(hdev);
579 return err;
580}
581
582/* ---- HCI ioctl helpers ---- */
583
584int hci_dev_open(__u16 dev)
585{
586 struct hci_dev *hdev;
587 int ret = 0;
588
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200589 hdev = hci_dev_get(dev);
590 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 return -ENODEV;
592
593 BT_DBG("%s %p", hdev->name, hdev);
594
595 hci_req_lock(hdev);
596
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200597 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
598 ret = -ERFKILL;
599 goto done;
600 }
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 if (test_bit(HCI_UP, &hdev->flags)) {
603 ret = -EALREADY;
604 goto done;
605 }
606
607 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
608 set_bit(HCI_RAW, &hdev->flags);
609
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200610 /* Treat all non BR/EDR controllers as raw devices if
611 enable_hs is not set */
612 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100613 set_bit(HCI_RAW, &hdev->flags);
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 if (hdev->open(hdev)) {
616 ret = -EIO;
617 goto done;
618 }
619
620 if (!test_bit(HCI_RAW, &hdev->flags)) {
621 atomic_set(&hdev->cmd_cnt, 1);
622 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200623 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Marcel Holtmann04837f62006-07-03 10:02:33 +0200625 ret = __hci_request(hdev, hci_init_req, 0,
626 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Andre Guedeseead27d2011-06-30 19:20:55 -0300628 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300629 ret = __hci_request(hdev, hci_le_init_req, 0,
630 msecs_to_jiffies(HCI_INIT_TIMEOUT));
631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 clear_bit(HCI_INIT, &hdev->flags);
633 }
634
635 if (!ret) {
636 hci_dev_hold(hdev);
637 set_bit(HCI_UP, &hdev->flags);
638 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200639 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300640 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200641 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300642 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200643 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900644 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200646 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200647 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400648 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 skb_queue_purge(&hdev->cmd_q);
651 skb_queue_purge(&hdev->rx_q);
652
653 if (hdev->flush)
654 hdev->flush(hdev);
655
656 if (hdev->sent_cmd) {
657 kfree_skb(hdev->sent_cmd);
658 hdev->sent_cmd = NULL;
659 }
660
661 hdev->close(hdev);
662 hdev->flags = 0;
663 }
664
665done:
666 hci_req_unlock(hdev);
667 hci_dev_put(hdev);
668 return ret;
669}
670
671static int hci_dev_do_close(struct hci_dev *hdev)
672{
673 BT_DBG("%s %p", hdev->name, hdev);
674
675 hci_req_cancel(hdev, ENODEV);
676 hci_req_lock(hdev);
677
678 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300679 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 hci_req_unlock(hdev);
681 return 0;
682 }
683
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200684 /* Flush RX and TX works */
685 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400686 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200688 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200689 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200690 hdev->discov_timeout = 0;
691 }
692
Johan Hedberg32435532011-11-07 22:16:04 +0200693 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200694 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200695
Johan Hedberg7d785252011-12-15 00:47:39 +0200696 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
697 cancel_delayed_work(&hdev->service_cache);
698
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300699 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 inquiry_cache_flush(hdev);
701 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300702 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704 hci_notify(hdev, HCI_DEV_DOWN);
705
706 if (hdev->flush)
707 hdev->flush(hdev);
708
709 /* Reset device */
710 skb_queue_purge(&hdev->cmd_q);
711 atomic_set(&hdev->cmd_cnt, 1);
712 if (!test_bit(HCI_RAW, &hdev->flags)) {
713 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200714 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200715 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 clear_bit(HCI_INIT, &hdev->flags);
717 }
718
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200719 /* flush cmd work */
720 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
722 /* Drop queues */
723 skb_queue_purge(&hdev->rx_q);
724 skb_queue_purge(&hdev->cmd_q);
725 skb_queue_purge(&hdev->raw_q);
726
727 /* Drop last sent command */
728 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300729 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 kfree_skb(hdev->sent_cmd);
731 hdev->sent_cmd = NULL;
732 }
733
734 /* After this point our queues are empty
735 * and no tasks are scheduled. */
736 hdev->close(hdev);
737
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300738 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200739 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300740 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200741
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 /* Clear flags */
743 hdev->flags = 0;
744
745 hci_req_unlock(hdev);
746
747 hci_dev_put(hdev);
748 return 0;
749}
750
751int hci_dev_close(__u16 dev)
752{
753 struct hci_dev *hdev;
754 int err;
755
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200756 hdev = hci_dev_get(dev);
757 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 return -ENODEV;
759 err = hci_dev_do_close(hdev);
760 hci_dev_put(hdev);
761 return err;
762}
763
764int hci_dev_reset(__u16 dev)
765{
766 struct hci_dev *hdev;
767 int ret = 0;
768
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200769 hdev = hci_dev_get(dev);
770 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 return -ENODEV;
772
773 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 if (!test_bit(HCI_UP, &hdev->flags))
776 goto done;
777
778 /* Drop queues */
779 skb_queue_purge(&hdev->rx_q);
780 skb_queue_purge(&hdev->cmd_q);
781
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300782 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 inquiry_cache_flush(hdev);
784 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300785 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787 if (hdev->flush)
788 hdev->flush(hdev);
789
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900790 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300791 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
793 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200794 ret = __hci_request(hdev, hci_reset_req, 0,
795 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
797done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 hci_req_unlock(hdev);
799 hci_dev_put(hdev);
800 return ret;
801}
802
803int hci_dev_reset_stat(__u16 dev)
804{
805 struct hci_dev *hdev;
806 int ret = 0;
807
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200808 hdev = hci_dev_get(dev);
809 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 return -ENODEV;
811
812 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
813
814 hci_dev_put(hdev);
815
816 return ret;
817}
818
819int hci_dev_cmd(unsigned int cmd, void __user *arg)
820{
821 struct hci_dev *hdev;
822 struct hci_dev_req dr;
823 int err = 0;
824
825 if (copy_from_user(&dr, arg, sizeof(dr)))
826 return -EFAULT;
827
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200828 hdev = hci_dev_get(dr.dev_id);
829 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 return -ENODEV;
831
832 switch (cmd) {
833 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200834 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
835 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 break;
837
838 case HCISETENCRYPT:
839 if (!lmp_encrypt_capable(hdev)) {
840 err = -EOPNOTSUPP;
841 break;
842 }
843
844 if (!test_bit(HCI_AUTH, &hdev->flags)) {
845 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200846 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
847 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 if (err)
849 break;
850 }
851
Marcel Holtmann04837f62006-07-03 10:02:33 +0200852 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
853 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 break;
855
856 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200857 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
858 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 break;
860
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200861 case HCISETLINKPOL:
862 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
863 msecs_to_jiffies(HCI_INIT_TIMEOUT));
864 break;
865
866 case HCISETLINKMODE:
867 hdev->link_mode = ((__u16) dr.dev_opt) &
868 (HCI_LM_MASTER | HCI_LM_ACCEPT);
869 break;
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 case HCISETPTYPE:
872 hdev->pkt_type = (__u16) dr.dev_opt;
873 break;
874
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200876 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
877 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 break;
879
880 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200881 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
882 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 break;
884
885 default:
886 err = -EINVAL;
887 break;
888 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 hci_dev_put(hdev);
891 return err;
892}
893
894int hci_get_dev_list(void __user *arg)
895{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200896 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 struct hci_dev_list_req *dl;
898 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 int n = 0, size, err;
900 __u16 dev_num;
901
902 if (get_user(dev_num, (__u16 __user *) arg))
903 return -EFAULT;
904
905 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
906 return -EINVAL;
907
908 size = sizeof(*dl) + dev_num * sizeof(*dr);
909
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200910 dl = kzalloc(size, GFP_KERNEL);
911 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 return -ENOMEM;
913
914 dr = dl->dev_req;
915
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200916 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200917 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200918 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200919 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200920
921 if (!test_bit(HCI_MGMT, &hdev->flags))
922 set_bit(HCI_PAIRABLE, &hdev->flags);
923
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 (dr + n)->dev_id = hdev->id;
925 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200926
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 if (++n >= dev_num)
928 break;
929 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200930 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
932 dl->dev_num = n;
933 size = sizeof(*dl) + n * sizeof(*dr);
934
935 err = copy_to_user(arg, dl, size);
936 kfree(dl);
937
938 return err ? -EFAULT : 0;
939}
940
941int hci_get_dev_info(void __user *arg)
942{
943 struct hci_dev *hdev;
944 struct hci_dev_info di;
945 int err = 0;
946
947 if (copy_from_user(&di, arg, sizeof(di)))
948 return -EFAULT;
949
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200950 hdev = hci_dev_get(di.dev_id);
951 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 return -ENODEV;
953
Johan Hedberg32435532011-11-07 22:16:04 +0200954 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
955 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200956
Johan Hedbergc542a062011-01-26 13:11:03 +0200957 if (!test_bit(HCI_MGMT, &hdev->flags))
958 set_bit(HCI_PAIRABLE, &hdev->flags);
959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 strcpy(di.name, hdev->name);
961 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100962 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 di.flags = hdev->flags;
964 di.pkt_type = hdev->pkt_type;
965 di.acl_mtu = hdev->acl_mtu;
966 di.acl_pkts = hdev->acl_pkts;
967 di.sco_mtu = hdev->sco_mtu;
968 di.sco_pkts = hdev->sco_pkts;
969 di.link_policy = hdev->link_policy;
970 di.link_mode = hdev->link_mode;
971
972 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
973 memcpy(&di.features, &hdev->features, sizeof(di.features));
974
975 if (copy_to_user(arg, &di, sizeof(di)))
976 err = -EFAULT;
977
978 hci_dev_put(hdev);
979
980 return err;
981}
982
983/* ---- Interface to HCI drivers ---- */
984
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200985static int hci_rfkill_set_block(void *data, bool blocked)
986{
987 struct hci_dev *hdev = data;
988
989 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
990
991 if (!blocked)
992 return 0;
993
994 hci_dev_do_close(hdev);
995
996 return 0;
997}
998
999static const struct rfkill_ops hci_rfkill_ops = {
1000 .set_block = hci_rfkill_set_block,
1001};
1002
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003/* Alloc HCI device */
1004struct hci_dev *hci_alloc_dev(void)
1005{
1006 struct hci_dev *hdev;
1007
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001008 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 if (!hdev)
1010 return NULL;
1011
David Herrmann0ac7e702011-10-08 14:58:47 +02001012 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 skb_queue_head_init(&hdev->driver_init);
1014
1015 return hdev;
1016}
1017EXPORT_SYMBOL(hci_alloc_dev);
1018
1019/* Free HCI device */
1020void hci_free_dev(struct hci_dev *hdev)
1021{
1022 skb_queue_purge(&hdev->driver_init);
1023
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001024 /* will free via device release */
1025 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026}
1027EXPORT_SYMBOL(hci_free_dev);
1028
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001029static void hci_power_on(struct work_struct *work)
1030{
1031 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1032
1033 BT_DBG("%s", hdev->name);
1034
1035 if (hci_dev_open(hdev->id) < 0)
1036 return;
1037
1038 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001039 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001040 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001041
1042 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001043 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001044}
1045
1046static void hci_power_off(struct work_struct *work)
1047{
Johan Hedberg32435532011-11-07 22:16:04 +02001048 struct hci_dev *hdev = container_of(work, struct hci_dev,
1049 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001050
1051 BT_DBG("%s", hdev->name);
1052
Johan Hedberg32435532011-11-07 22:16:04 +02001053 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1054
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001055 hci_dev_close(hdev->id);
1056}
1057
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001058static void hci_discov_off(struct work_struct *work)
1059{
1060 struct hci_dev *hdev;
1061 u8 scan = SCAN_PAGE;
1062
1063 hdev = container_of(work, struct hci_dev, discov_off.work);
1064
1065 BT_DBG("%s", hdev->name);
1066
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001067 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001068
1069 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1070
1071 hdev->discov_timeout = 0;
1072
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001073 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001074}
1075
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001076int hci_uuids_clear(struct hci_dev *hdev)
1077{
1078 struct list_head *p, *n;
1079
1080 list_for_each_safe(p, n, &hdev->uuids) {
1081 struct bt_uuid *uuid;
1082
1083 uuid = list_entry(p, struct bt_uuid, list);
1084
1085 list_del(p);
1086 kfree(uuid);
1087 }
1088
1089 return 0;
1090}
1091
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001092int hci_link_keys_clear(struct hci_dev *hdev)
1093{
1094 struct list_head *p, *n;
1095
1096 list_for_each_safe(p, n, &hdev->link_keys) {
1097 struct link_key *key;
1098
1099 key = list_entry(p, struct link_key, list);
1100
1101 list_del(p);
1102 kfree(key);
1103 }
1104
1105 return 0;
1106}
1107
1108struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1109{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001110 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001111
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001112 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001113 if (bacmp(bdaddr, &k->bdaddr) == 0)
1114 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001115
1116 return NULL;
1117}
1118
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001119static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1120 u8 key_type, u8 old_key_type)
1121{
1122 /* Legacy key */
1123 if (key_type < 0x03)
1124 return 1;
1125
1126 /* Debug keys are insecure so don't store them persistently */
1127 if (key_type == HCI_LK_DEBUG_COMBINATION)
1128 return 0;
1129
1130 /* Changed combination key and there's no previous one */
1131 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1132 return 0;
1133
1134 /* Security mode 3 case */
1135 if (!conn)
1136 return 1;
1137
1138 /* Neither local nor remote side had no-bonding as requirement */
1139 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1140 return 1;
1141
1142 /* Local side had dedicated bonding as requirement */
1143 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1144 return 1;
1145
1146 /* Remote side had dedicated bonding as requirement */
1147 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1148 return 1;
1149
1150 /* If none of the above criteria match, then don't store the key
1151 * persistently */
1152 return 0;
1153}
1154
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001155struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1156{
1157 struct link_key *k;
1158
1159 list_for_each_entry(k, &hdev->link_keys, list) {
1160 struct key_master_id *id;
1161
1162 if (k->type != HCI_LK_SMP_LTK)
1163 continue;
1164
1165 if (k->dlen != sizeof(*id))
1166 continue;
1167
1168 id = (void *) &k->data;
1169 if (id->ediv == ediv &&
1170 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1171 return k;
1172 }
1173
1174 return NULL;
1175}
1176EXPORT_SYMBOL(hci_find_ltk);
1177
1178struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1179 bdaddr_t *bdaddr, u8 type)
1180{
1181 struct link_key *k;
1182
1183 list_for_each_entry(k, &hdev->link_keys, list)
1184 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1185 return k;
1186
1187 return NULL;
1188}
1189EXPORT_SYMBOL(hci_find_link_key_type);
1190
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001191int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1192 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001193{
1194 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001195 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001196
1197 old_key = hci_find_link_key(hdev, bdaddr);
1198 if (old_key) {
1199 old_key_type = old_key->type;
1200 key = old_key;
1201 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001202 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001203 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1204 if (!key)
1205 return -ENOMEM;
1206 list_add(&key->list, &hdev->link_keys);
1207 }
1208
1209 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1210
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001211 /* Some buggy controller combinations generate a changed
1212 * combination key for legacy pairing even when there's no
1213 * previous key */
1214 if (type == HCI_LK_CHANGED_COMBINATION &&
1215 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001216 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001217 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001218 if (conn)
1219 conn->key_type = type;
1220 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001221
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001222 bacpy(&key->bdaddr, bdaddr);
1223 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001224 key->pin_len = pin_len;
1225
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001226 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001227 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001228 else
1229 key->type = type;
1230
Johan Hedberg4df378a2011-04-28 11:29:03 -07001231 if (!new_key)
1232 return 0;
1233
1234 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1235
Johan Hedberg744cf192011-11-08 20:40:14 +02001236 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001237
1238 if (!persistent) {
1239 list_del(&key->list);
1240 kfree(key);
1241 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001242
1243 return 0;
1244}
1245
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001246int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001247 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001248{
1249 struct link_key *key, *old_key;
1250 struct key_master_id *id;
1251 u8 old_key_type;
1252
1253 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1254
1255 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1256 if (old_key) {
1257 key = old_key;
1258 old_key_type = old_key->type;
1259 } else {
1260 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1261 if (!key)
1262 return -ENOMEM;
1263 list_add(&key->list, &hdev->link_keys);
1264 old_key_type = 0xff;
1265 }
1266
1267 key->dlen = sizeof(*id);
1268
1269 bacpy(&key->bdaddr, bdaddr);
1270 memcpy(key->val, ltk, sizeof(key->val));
1271 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001272 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001273
1274 id = (void *) &key->data;
1275 id->ediv = ediv;
1276 memcpy(id->rand, rand, sizeof(id->rand));
1277
1278 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001279 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001280
1281 return 0;
1282}
1283
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001284int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1285{
1286 struct link_key *key;
1287
1288 key = hci_find_link_key(hdev, bdaddr);
1289 if (!key)
1290 return -ENOENT;
1291
1292 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1293
1294 list_del(&key->list);
1295 kfree(key);
1296
1297 return 0;
1298}
1299
Ville Tervo6bd32322011-02-16 16:32:41 +02001300/* HCI command timer function */
1301static void hci_cmd_timer(unsigned long arg)
1302{
1303 struct hci_dev *hdev = (void *) arg;
1304
1305 BT_ERR("%s command tx timeout", hdev->name);
1306 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001307 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001308}
1309
Szymon Janc2763eda2011-03-22 13:12:22 +01001310struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1311 bdaddr_t *bdaddr)
1312{
1313 struct oob_data *data;
1314
1315 list_for_each_entry(data, &hdev->remote_oob_data, list)
1316 if (bacmp(bdaddr, &data->bdaddr) == 0)
1317 return data;
1318
1319 return NULL;
1320}
1321
1322int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1323{
1324 struct oob_data *data;
1325
1326 data = hci_find_remote_oob_data(hdev, bdaddr);
1327 if (!data)
1328 return -ENOENT;
1329
1330 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1331
1332 list_del(&data->list);
1333 kfree(data);
1334
1335 return 0;
1336}
1337
1338int hci_remote_oob_data_clear(struct hci_dev *hdev)
1339{
1340 struct oob_data *data, *n;
1341
1342 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1343 list_del(&data->list);
1344 kfree(data);
1345 }
1346
1347 return 0;
1348}
1349
1350int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1351 u8 *randomizer)
1352{
1353 struct oob_data *data;
1354
1355 data = hci_find_remote_oob_data(hdev, bdaddr);
1356
1357 if (!data) {
1358 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1359 if (!data)
1360 return -ENOMEM;
1361
1362 bacpy(&data->bdaddr, bdaddr);
1363 list_add(&data->list, &hdev->remote_oob_data);
1364 }
1365
1366 memcpy(data->hash, hash, sizeof(data->hash));
1367 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1368
1369 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1370
1371 return 0;
1372}
1373
Antti Julkub2a66aa2011-06-15 12:01:14 +03001374struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1375 bdaddr_t *bdaddr)
1376{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001377 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001378
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001379 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001380 if (bacmp(bdaddr, &b->bdaddr) == 0)
1381 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001382
1383 return NULL;
1384}
1385
1386int hci_blacklist_clear(struct hci_dev *hdev)
1387{
1388 struct list_head *p, *n;
1389
1390 list_for_each_safe(p, n, &hdev->blacklist) {
1391 struct bdaddr_list *b;
1392
1393 b = list_entry(p, struct bdaddr_list, list);
1394
1395 list_del(p);
1396 kfree(b);
1397 }
1398
1399 return 0;
1400}
1401
1402int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1403{
1404 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001405
1406 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1407 return -EBADF;
1408
Antti Julku5e762442011-08-25 16:48:02 +03001409 if (hci_blacklist_lookup(hdev, bdaddr))
1410 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001411
1412 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001413 if (!entry)
1414 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001415
1416 bacpy(&entry->bdaddr, bdaddr);
1417
1418 list_add(&entry->list, &hdev->blacklist);
1419
Johan Hedberg744cf192011-11-08 20:40:14 +02001420 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001421}
1422
1423int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1424{
1425 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001426
Szymon Janc1ec918c2011-11-16 09:32:21 +01001427 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001428 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001429
1430 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001431 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001432 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001433
1434 list_del(&entry->list);
1435 kfree(entry);
1436
Johan Hedberg744cf192011-11-08 20:40:14 +02001437 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001438}
1439
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001440static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001441{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001442 struct hci_dev *hdev = container_of(work, struct hci_dev,
1443 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001444
1445 hci_dev_lock(hdev);
1446
1447 hci_adv_entries_clear(hdev);
1448
1449 hci_dev_unlock(hdev);
1450}
1451
Andre Guedes76c86862011-05-26 16:23:50 -03001452int hci_adv_entries_clear(struct hci_dev *hdev)
1453{
1454 struct adv_entry *entry, *tmp;
1455
1456 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1457 list_del(&entry->list);
1458 kfree(entry);
1459 }
1460
1461 BT_DBG("%s adv cache cleared", hdev->name);
1462
1463 return 0;
1464}
1465
1466struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1467{
1468 struct adv_entry *entry;
1469
1470 list_for_each_entry(entry, &hdev->adv_entries, list)
1471 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1472 return entry;
1473
1474 return NULL;
1475}
1476
1477static inline int is_connectable_adv(u8 evt_type)
1478{
1479 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1480 return 1;
1481
1482 return 0;
1483}
1484
1485int hci_add_adv_entry(struct hci_dev *hdev,
1486 struct hci_ev_le_advertising_info *ev)
1487{
1488 struct adv_entry *entry;
1489
1490 if (!is_connectable_adv(ev->evt_type))
1491 return -EINVAL;
1492
1493 /* Only new entries should be added to adv_entries. So, if
1494 * bdaddr was found, don't add it. */
1495 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1496 return 0;
1497
1498 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1499 if (!entry)
1500 return -ENOMEM;
1501
1502 bacpy(&entry->bdaddr, &ev->bdaddr);
1503 entry->bdaddr_type = ev->bdaddr_type;
1504
1505 list_add(&entry->list, &hdev->adv_entries);
1506
1507 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1508 batostr(&entry->bdaddr), entry->bdaddr_type);
1509
1510 return 0;
1511}
1512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513/* Register HCI device */
1514int hci_register_dev(struct hci_dev *hdev)
1515{
1516 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001517 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001519 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1520 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
1522 if (!hdev->open || !hdev->close || !hdev->destruct)
1523 return -EINVAL;
1524
Mat Martineau08add512011-11-02 16:18:36 -07001525 /* Do not allow HCI_AMP devices to register at index 0,
1526 * so the index can be used as the AMP controller ID.
1527 */
1528 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1529
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001530 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
1532 /* Find first available device id */
1533 list_for_each(p, &hci_dev_list) {
1534 if (list_entry(p, struct hci_dev, list)->id != id)
1535 break;
1536 head = p; id++;
1537 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 sprintf(hdev->name, "hci%d", id);
1540 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001541 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
1543 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001544 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
1546 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001547 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001549 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001551 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
Marcel Holtmann04837f62006-07-03 10:02:33 +02001553 hdev->idle_timeout = 0;
1554 hdev->sniff_max_interval = 800;
1555 hdev->sniff_min_interval = 80;
1556
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001557 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001558 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001559 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
1562 skb_queue_head_init(&hdev->rx_q);
1563 skb_queue_head_init(&hdev->cmd_q);
1564 skb_queue_head_init(&hdev->raw_q);
1565
Ville Tervo6bd32322011-02-16 16:32:41 +02001566 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1567
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301568 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001569 hdev->reassembly[i] = NULL;
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001572 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Johan Hedberg30883512012-01-04 14:16:21 +02001574 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
1576 hci_conn_hash_init(hdev);
1577
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001578 INIT_LIST_HEAD(&hdev->mgmt_pending);
1579
David Millerea4bd8b2010-07-30 21:54:49 -07001580 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001581
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001582 INIT_LIST_HEAD(&hdev->uuids);
1583
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001584 INIT_LIST_HEAD(&hdev->link_keys);
1585
Szymon Janc2763eda2011-03-22 13:12:22 +01001586 INIT_LIST_HEAD(&hdev->remote_oob_data);
1587
Andre Guedes76c86862011-05-26 16:23:50 -03001588 INIT_LIST_HEAD(&hdev->adv_entries);
1589
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001590 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001591 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001592 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001593
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001594 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1597
1598 atomic_set(&hdev->promisc, 0);
1599
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001600 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001602 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1603 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001604 if (!hdev->workqueue) {
1605 error = -ENOMEM;
1606 goto err;
1607 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001608
David Herrmann33ca9542011-10-08 14:58:49 +02001609 error = hci_add_sysfs(hdev);
1610 if (error < 0)
1611 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001613 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1614 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1615 if (hdev->rfkill) {
1616 if (rfkill_register(hdev->rfkill) < 0) {
1617 rfkill_destroy(hdev->rfkill);
1618 hdev->rfkill = NULL;
1619 }
1620 }
1621
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001622 set_bit(HCI_AUTO_OFF, &hdev->flags);
1623 set_bit(HCI_SETUP, &hdev->flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001624 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 hci_notify(hdev, HCI_DEV_REG);
1627
1628 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001629
David Herrmann33ca9542011-10-08 14:58:49 +02001630err_wqueue:
1631 destroy_workqueue(hdev->workqueue);
1632err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001633 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001634 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001635 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001636
David Herrmann33ca9542011-10-08 14:58:49 +02001637 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638}
1639EXPORT_SYMBOL(hci_register_dev);
1640
1641/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001642void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643{
Marcel Holtmannef222012007-07-11 06:42:04 +02001644 int i;
1645
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001646 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001648 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001650 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
1652 hci_dev_do_close(hdev);
1653
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301654 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001655 kfree_skb(hdev->reassembly[i]);
1656
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001657 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001658 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001659 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001660 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001661 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001662 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001663
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001664 /* mgmt_index_removed should take care of emptying the
1665 * pending list */
1666 BUG_ON(!list_empty(&hdev->mgmt_pending));
1667
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 hci_notify(hdev, HCI_DEV_UNREG);
1669
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001670 if (hdev->rfkill) {
1671 rfkill_unregister(hdev->rfkill);
1672 rfkill_destroy(hdev->rfkill);
1673 }
1674
David Herrmannce242972011-10-08 14:58:48 +02001675 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001676
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001677 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001678
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001679 destroy_workqueue(hdev->workqueue);
1680
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001681 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001682 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001683 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001684 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001685 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001686 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001687 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690}
1691EXPORT_SYMBOL(hci_unregister_dev);
1692
1693/* Suspend HCI device */
1694int hci_suspend_dev(struct hci_dev *hdev)
1695{
1696 hci_notify(hdev, HCI_DEV_SUSPEND);
1697 return 0;
1698}
1699EXPORT_SYMBOL(hci_suspend_dev);
1700
1701/* Resume HCI device */
1702int hci_resume_dev(struct hci_dev *hdev)
1703{
1704 hci_notify(hdev, HCI_DEV_RESUME);
1705 return 0;
1706}
1707EXPORT_SYMBOL(hci_resume_dev);
1708
Marcel Holtmann76bca882009-11-18 00:40:39 +01001709/* Receive frame from HCI drivers */
1710int hci_recv_frame(struct sk_buff *skb)
1711{
1712 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1713 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1714 && !test_bit(HCI_INIT, &hdev->flags))) {
1715 kfree_skb(skb);
1716 return -ENXIO;
1717 }
1718
1719 /* Incomming skb */
1720 bt_cb(skb)->incoming = 1;
1721
1722 /* Time stamp */
1723 __net_timestamp(skb);
1724
Marcel Holtmann76bca882009-11-18 00:40:39 +01001725 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001726 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001727
Marcel Holtmann76bca882009-11-18 00:40:39 +01001728 return 0;
1729}
1730EXPORT_SYMBOL(hci_recv_frame);
1731
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301732static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001733 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301734{
1735 int len = 0;
1736 int hlen = 0;
1737 int remain = count;
1738 struct sk_buff *skb;
1739 struct bt_skb_cb *scb;
1740
1741 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1742 index >= NUM_REASSEMBLY)
1743 return -EILSEQ;
1744
1745 skb = hdev->reassembly[index];
1746
1747 if (!skb) {
1748 switch (type) {
1749 case HCI_ACLDATA_PKT:
1750 len = HCI_MAX_FRAME_SIZE;
1751 hlen = HCI_ACL_HDR_SIZE;
1752 break;
1753 case HCI_EVENT_PKT:
1754 len = HCI_MAX_EVENT_SIZE;
1755 hlen = HCI_EVENT_HDR_SIZE;
1756 break;
1757 case HCI_SCODATA_PKT:
1758 len = HCI_MAX_SCO_SIZE;
1759 hlen = HCI_SCO_HDR_SIZE;
1760 break;
1761 }
1762
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001763 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301764 if (!skb)
1765 return -ENOMEM;
1766
1767 scb = (void *) skb->cb;
1768 scb->expect = hlen;
1769 scb->pkt_type = type;
1770
1771 skb->dev = (void *) hdev;
1772 hdev->reassembly[index] = skb;
1773 }
1774
1775 while (count) {
1776 scb = (void *) skb->cb;
1777 len = min(scb->expect, (__u16)count);
1778
1779 memcpy(skb_put(skb, len), data, len);
1780
1781 count -= len;
1782 data += len;
1783 scb->expect -= len;
1784 remain = count;
1785
1786 switch (type) {
1787 case HCI_EVENT_PKT:
1788 if (skb->len == HCI_EVENT_HDR_SIZE) {
1789 struct hci_event_hdr *h = hci_event_hdr(skb);
1790 scb->expect = h->plen;
1791
1792 if (skb_tailroom(skb) < scb->expect) {
1793 kfree_skb(skb);
1794 hdev->reassembly[index] = NULL;
1795 return -ENOMEM;
1796 }
1797 }
1798 break;
1799
1800 case HCI_ACLDATA_PKT:
1801 if (skb->len == HCI_ACL_HDR_SIZE) {
1802 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1803 scb->expect = __le16_to_cpu(h->dlen);
1804
1805 if (skb_tailroom(skb) < scb->expect) {
1806 kfree_skb(skb);
1807 hdev->reassembly[index] = NULL;
1808 return -ENOMEM;
1809 }
1810 }
1811 break;
1812
1813 case HCI_SCODATA_PKT:
1814 if (skb->len == HCI_SCO_HDR_SIZE) {
1815 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1816 scb->expect = h->dlen;
1817
1818 if (skb_tailroom(skb) < scb->expect) {
1819 kfree_skb(skb);
1820 hdev->reassembly[index] = NULL;
1821 return -ENOMEM;
1822 }
1823 }
1824 break;
1825 }
1826
1827 if (scb->expect == 0) {
1828 /* Complete frame */
1829
1830 bt_cb(skb)->pkt_type = type;
1831 hci_recv_frame(skb);
1832
1833 hdev->reassembly[index] = NULL;
1834 return remain;
1835 }
1836 }
1837
1838 return remain;
1839}
1840
Marcel Holtmannef222012007-07-11 06:42:04 +02001841int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1842{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301843 int rem = 0;
1844
Marcel Holtmannef222012007-07-11 06:42:04 +02001845 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1846 return -EILSEQ;
1847
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001848 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001849 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301850 if (rem < 0)
1851 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001852
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301853 data += (count - rem);
1854 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001855 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001856
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301857 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001858}
1859EXPORT_SYMBOL(hci_recv_fragment);
1860
Suraj Sumangala99811512010-07-14 13:02:19 +05301861#define STREAM_REASSEMBLY 0
1862
1863int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1864{
1865 int type;
1866 int rem = 0;
1867
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001868 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301869 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1870
1871 if (!skb) {
1872 struct { char type; } *pkt;
1873
1874 /* Start of the frame */
1875 pkt = data;
1876 type = pkt->type;
1877
1878 data++;
1879 count--;
1880 } else
1881 type = bt_cb(skb)->pkt_type;
1882
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001883 rem = hci_reassembly(hdev, type, data, count,
1884 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301885 if (rem < 0)
1886 return rem;
1887
1888 data += (count - rem);
1889 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001890 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301891
1892 return rem;
1893}
1894EXPORT_SYMBOL(hci_recv_stream_fragment);
1895
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896/* ---- Interface to upper protocols ---- */
1897
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898int hci_register_cb(struct hci_cb *cb)
1899{
1900 BT_DBG("%p name %s", cb, cb->name);
1901
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001902 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001904 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
1906 return 0;
1907}
1908EXPORT_SYMBOL(hci_register_cb);
1909
1910int hci_unregister_cb(struct hci_cb *cb)
1911{
1912 BT_DBG("%p name %s", cb, cb->name);
1913
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001914 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001916 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917
1918 return 0;
1919}
1920EXPORT_SYMBOL(hci_unregister_cb);
1921
1922static int hci_send_frame(struct sk_buff *skb)
1923{
1924 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1925
1926 if (!hdev) {
1927 kfree_skb(skb);
1928 return -ENODEV;
1929 }
1930
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001931 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933 if (atomic_read(&hdev->promisc)) {
1934 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001935 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001937 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939
1940 /* Get rid of skb owner, prior to sending to the driver. */
1941 skb_orphan(skb);
1942
1943 return hdev->send(skb);
1944}
1945
1946/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001947int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948{
1949 int len = HCI_COMMAND_HDR_SIZE + plen;
1950 struct hci_command_hdr *hdr;
1951 struct sk_buff *skb;
1952
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001953 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954
1955 skb = bt_skb_alloc(len, GFP_ATOMIC);
1956 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001957 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 return -ENOMEM;
1959 }
1960
1961 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001962 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 hdr->plen = plen;
1964
1965 if (plen)
1966 memcpy(skb_put(skb, plen), param, plen);
1967
1968 BT_DBG("skb len %d", skb->len);
1969
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001970 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001972
Johan Hedberga5040ef2011-01-10 13:28:59 +02001973 if (test_bit(HCI_INIT, &hdev->flags))
1974 hdev->init_last_cmd = opcode;
1975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001977 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
1979 return 0;
1980}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
1982/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001983void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984{
1985 struct hci_command_hdr *hdr;
1986
1987 if (!hdev->sent_cmd)
1988 return NULL;
1989
1990 hdr = (void *) hdev->sent_cmd->data;
1991
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001992 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 return NULL;
1994
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001995 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
1997 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1998}
1999
2000/* Send ACL data */
2001static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2002{
2003 struct hci_acl_hdr *hdr;
2004 int len = skb->len;
2005
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002006 skb_push(skb, HCI_ACL_HDR_SIZE);
2007 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002008 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002009 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2010 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011}
2012
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002013static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2014 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015{
2016 struct hci_dev *hdev = conn->hdev;
2017 struct sk_buff *list;
2018
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002019 list = skb_shinfo(skb)->frag_list;
2020 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 /* Non fragmented */
2022 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2023
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002024 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 } else {
2026 /* Fragmented */
2027 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2028
2029 skb_shinfo(skb)->frag_list = NULL;
2030
2031 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002032 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002034 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002035
2036 flags &= ~ACL_START;
2037 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 do {
2039 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002040
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002042 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002043 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2046
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002047 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 } while (list);
2049
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002050 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002052}
2053
2054void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2055{
2056 struct hci_conn *conn = chan->conn;
2057 struct hci_dev *hdev = conn->hdev;
2058
2059 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2060
2061 skb->dev = (void *) hdev;
2062 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2063 hci_add_acl_hdr(skb, conn->handle, flags);
2064
2065 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002067 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068}
2069EXPORT_SYMBOL(hci_send_acl);
2070
2071/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002072void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073{
2074 struct hci_dev *hdev = conn->hdev;
2075 struct hci_sco_hdr hdr;
2076
2077 BT_DBG("%s len %d", hdev->name, skb->len);
2078
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002079 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 hdr.dlen = skb->len;
2081
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002082 skb_push(skb, HCI_SCO_HDR_SIZE);
2083 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002084 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
2086 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002087 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002090 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091}
2092EXPORT_SYMBOL(hci_send_sco);
2093
2094/* ---- HCI TX task (outgoing data) ---- */
2095
2096/* HCI Connection scheduler */
2097static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2098{
2099 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002100 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002103 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002105
2106 rcu_read_lock();
2107
2108 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002109 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002111
2112 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2113 continue;
2114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 num++;
2116
2117 if (c->sent < min) {
2118 min = c->sent;
2119 conn = c;
2120 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002121
2122 if (hci_conn_num(hdev, type) == num)
2123 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 }
2125
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002126 rcu_read_unlock();
2127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002129 int cnt, q;
2130
2131 switch (conn->type) {
2132 case ACL_LINK:
2133 cnt = hdev->acl_cnt;
2134 break;
2135 case SCO_LINK:
2136 case ESCO_LINK:
2137 cnt = hdev->sco_cnt;
2138 break;
2139 case LE_LINK:
2140 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2141 break;
2142 default:
2143 cnt = 0;
2144 BT_ERR("Unknown link type");
2145 }
2146
2147 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 *quote = q ? q : 1;
2149 } else
2150 *quote = 0;
2151
2152 BT_DBG("conn %p quote %d", conn, *quote);
2153 return conn;
2154}
2155
Ville Tervobae1f5d92011-02-10 22:38:53 -03002156static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
2158 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002159 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
Ville Tervobae1f5d92011-02-10 22:38:53 -03002161 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002163 rcu_read_lock();
2164
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002166 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002167 if (c->type == type && c->sent) {
2168 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 hdev->name, batostr(&c->dst));
2170 hci_acl_disconn(c, 0x13);
2171 }
2172 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002173
2174 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175}
2176
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002177static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2178 int *quote)
2179{
2180 struct hci_conn_hash *h = &hdev->conn_hash;
2181 struct hci_chan *chan = NULL;
2182 int num = 0, min = ~0, cur_prio = 0;
2183 struct hci_conn *conn;
2184 int cnt, q, conn_num = 0;
2185
2186 BT_DBG("%s", hdev->name);
2187
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002188 rcu_read_lock();
2189
2190 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002191 struct hci_chan *tmp;
2192
2193 if (conn->type != type)
2194 continue;
2195
2196 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2197 continue;
2198
2199 conn_num++;
2200
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002201 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002202 struct sk_buff *skb;
2203
2204 if (skb_queue_empty(&tmp->data_q))
2205 continue;
2206
2207 skb = skb_peek(&tmp->data_q);
2208 if (skb->priority < cur_prio)
2209 continue;
2210
2211 if (skb->priority > cur_prio) {
2212 num = 0;
2213 min = ~0;
2214 cur_prio = skb->priority;
2215 }
2216
2217 num++;
2218
2219 if (conn->sent < min) {
2220 min = conn->sent;
2221 chan = tmp;
2222 }
2223 }
2224
2225 if (hci_conn_num(hdev, type) == conn_num)
2226 break;
2227 }
2228
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002229 rcu_read_unlock();
2230
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002231 if (!chan)
2232 return NULL;
2233
2234 switch (chan->conn->type) {
2235 case ACL_LINK:
2236 cnt = hdev->acl_cnt;
2237 break;
2238 case SCO_LINK:
2239 case ESCO_LINK:
2240 cnt = hdev->sco_cnt;
2241 break;
2242 case LE_LINK:
2243 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2244 break;
2245 default:
2246 cnt = 0;
2247 BT_ERR("Unknown link type");
2248 }
2249
2250 q = cnt / num;
2251 *quote = q ? q : 1;
2252 BT_DBG("chan %p quote %d", chan, *quote);
2253 return chan;
2254}
2255
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002256static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2257{
2258 struct hci_conn_hash *h = &hdev->conn_hash;
2259 struct hci_conn *conn;
2260 int num = 0;
2261
2262 BT_DBG("%s", hdev->name);
2263
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002264 rcu_read_lock();
2265
2266 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002267 struct hci_chan *chan;
2268
2269 if (conn->type != type)
2270 continue;
2271
2272 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2273 continue;
2274
2275 num++;
2276
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002277 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002278 struct sk_buff *skb;
2279
2280 if (chan->sent) {
2281 chan->sent = 0;
2282 continue;
2283 }
2284
2285 if (skb_queue_empty(&chan->data_q))
2286 continue;
2287
2288 skb = skb_peek(&chan->data_q);
2289 if (skb->priority >= HCI_PRIO_MAX - 1)
2290 continue;
2291
2292 skb->priority = HCI_PRIO_MAX - 1;
2293
2294 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2295 skb->priority);
2296 }
2297
2298 if (hci_conn_num(hdev, type) == num)
2299 break;
2300 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002301
2302 rcu_read_unlock();
2303
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002304}
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306static inline void hci_sched_acl(struct hci_dev *hdev)
2307{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002308 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 struct sk_buff *skb;
2310 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002311 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312
2313 BT_DBG("%s", hdev->name);
2314
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002315 if (!hci_conn_num(hdev, ACL_LINK))
2316 return;
2317
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 if (!test_bit(HCI_RAW, &hdev->flags)) {
2319 /* ACL tx timeout must be longer than maximum
2320 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002321 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002322 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 }
2324
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002325 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002326
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002327 while (hdev->acl_cnt &&
2328 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002329 u32 priority = (skb_peek(&chan->data_q))->priority;
2330 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002331 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2332 skb->len, skb->priority);
2333
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002334 /* Stop if priority has changed */
2335 if (skb->priority < priority)
2336 break;
2337
2338 skb = skb_dequeue(&chan->data_q);
2339
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002340 hci_conn_enter_active_mode(chan->conn,
2341 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 hci_send_frame(skb);
2344 hdev->acl_last_tx = jiffies;
2345
2346 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002347 chan->sent++;
2348 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 }
2350 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002351
2352 if (cnt != hdev->acl_cnt)
2353 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354}
2355
2356/* Schedule SCO */
2357static inline void hci_sched_sco(struct hci_dev *hdev)
2358{
2359 struct hci_conn *conn;
2360 struct sk_buff *skb;
2361 int quote;
2362
2363 BT_DBG("%s", hdev->name);
2364
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002365 if (!hci_conn_num(hdev, SCO_LINK))
2366 return;
2367
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2369 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2370 BT_DBG("skb %p len %d", skb, skb->len);
2371 hci_send_frame(skb);
2372
2373 conn->sent++;
2374 if (conn->sent == ~0)
2375 conn->sent = 0;
2376 }
2377 }
2378}
2379
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002380static inline void hci_sched_esco(struct hci_dev *hdev)
2381{
2382 struct hci_conn *conn;
2383 struct sk_buff *skb;
2384 int quote;
2385
2386 BT_DBG("%s", hdev->name);
2387
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002388 if (!hci_conn_num(hdev, ESCO_LINK))
2389 return;
2390
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002391 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2392 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2393 BT_DBG("skb %p len %d", skb, skb->len);
2394 hci_send_frame(skb);
2395
2396 conn->sent++;
2397 if (conn->sent == ~0)
2398 conn->sent = 0;
2399 }
2400 }
2401}
2402
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002403static inline void hci_sched_le(struct hci_dev *hdev)
2404{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002405 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002406 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002407 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002408
2409 BT_DBG("%s", hdev->name);
2410
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002411 if (!hci_conn_num(hdev, LE_LINK))
2412 return;
2413
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002414 if (!test_bit(HCI_RAW, &hdev->flags)) {
2415 /* LE tx timeout must be longer than maximum
2416 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002417 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002418 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002419 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002420 }
2421
2422 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002423 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002424 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002425 u32 priority = (skb_peek(&chan->data_q))->priority;
2426 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002427 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2428 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002429
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002430 /* Stop if priority has changed */
2431 if (skb->priority < priority)
2432 break;
2433
2434 skb = skb_dequeue(&chan->data_q);
2435
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002436 hci_send_frame(skb);
2437 hdev->le_last_tx = jiffies;
2438
2439 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002440 chan->sent++;
2441 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002442 }
2443 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002444
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002445 if (hdev->le_pkts)
2446 hdev->le_cnt = cnt;
2447 else
2448 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002449
2450 if (cnt != tmp)
2451 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002452}
2453
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002454static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002456 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 struct sk_buff *skb;
2458
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002459 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2460 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461
2462 /* Schedule queues and send stuff to HCI driver */
2463
2464 hci_sched_acl(hdev);
2465
2466 hci_sched_sco(hdev);
2467
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002468 hci_sched_esco(hdev);
2469
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002470 hci_sched_le(hdev);
2471
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 /* Send next queued raw (unknown type) packet */
2473 while ((skb = skb_dequeue(&hdev->raw_q)))
2474 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475}
2476
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002477/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478
2479/* ACL data packet */
2480static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2481{
2482 struct hci_acl_hdr *hdr = (void *) skb->data;
2483 struct hci_conn *conn;
2484 __u16 handle, flags;
2485
2486 skb_pull(skb, HCI_ACL_HDR_SIZE);
2487
2488 handle = __le16_to_cpu(hdr->handle);
2489 flags = hci_flags(handle);
2490 handle = hci_handle(handle);
2491
2492 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2493
2494 hdev->stat.acl_rx++;
2495
2496 hci_dev_lock(hdev);
2497 conn = hci_conn_hash_lookup_handle(hdev, handle);
2498 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002499
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002501 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002502
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002504 l2cap_recv_acldata(conn, skb, flags);
2505 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002507 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 hdev->name, handle);
2509 }
2510
2511 kfree_skb(skb);
2512}
2513
2514/* SCO data packet */
2515static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2516{
2517 struct hci_sco_hdr *hdr = (void *) skb->data;
2518 struct hci_conn *conn;
2519 __u16 handle;
2520
2521 skb_pull(skb, HCI_SCO_HDR_SIZE);
2522
2523 handle = __le16_to_cpu(hdr->handle);
2524
2525 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2526
2527 hdev->stat.sco_rx++;
2528
2529 hci_dev_lock(hdev);
2530 conn = hci_conn_hash_lookup_handle(hdev, handle);
2531 hci_dev_unlock(hdev);
2532
2533 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002535 sco_recv_scodata(conn, skb);
2536 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002538 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 hdev->name, handle);
2540 }
2541
2542 kfree_skb(skb);
2543}
2544
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002545static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002547 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 struct sk_buff *skb;
2549
2550 BT_DBG("%s", hdev->name);
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 while ((skb = skb_dequeue(&hdev->rx_q))) {
2553 if (atomic_read(&hdev->promisc)) {
2554 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002555 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 }
2557
2558 if (test_bit(HCI_RAW, &hdev->flags)) {
2559 kfree_skb(skb);
2560 continue;
2561 }
2562
2563 if (test_bit(HCI_INIT, &hdev->flags)) {
2564 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002565 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 case HCI_ACLDATA_PKT:
2567 case HCI_SCODATA_PKT:
2568 kfree_skb(skb);
2569 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 }
2572
2573 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002574 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002576 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 hci_event_packet(hdev, skb);
2578 break;
2579
2580 case HCI_ACLDATA_PKT:
2581 BT_DBG("%s ACL data packet", hdev->name);
2582 hci_acldata_packet(hdev, skb);
2583 break;
2584
2585 case HCI_SCODATA_PKT:
2586 BT_DBG("%s SCO data packet", hdev->name);
2587 hci_scodata_packet(hdev, skb);
2588 break;
2589
2590 default:
2591 kfree_skb(skb);
2592 break;
2593 }
2594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595}
2596
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002597static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002599 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 struct sk_buff *skb;
2601
2602 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2603
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002605 if (atomic_read(&hdev->cmd_cnt)) {
2606 skb = skb_dequeue(&hdev->cmd_q);
2607 if (!skb)
2608 return;
2609
Wei Yongjun7585b972009-02-25 18:29:52 +08002610 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002612 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2613 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 atomic_dec(&hdev->cmd_cnt);
2615 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002616 if (test_bit(HCI_RESET, &hdev->flags))
2617 del_timer(&hdev->cmd_timer);
2618 else
2619 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002620 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 } else {
2622 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002623 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 }
2625 }
2626}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002627
2628int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2629{
2630 /* General inquiry access code (GIAC) */
2631 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2632 struct hci_cp_inquiry cp;
2633
2634 BT_DBG("%s", hdev->name);
2635
2636 if (test_bit(HCI_INQUIRY, &hdev->flags))
2637 return -EINPROGRESS;
2638
Johan Hedberg46632622012-01-02 16:06:08 +02002639 inquiry_cache_flush(hdev);
2640
Andre Guedes2519a1f2011-11-07 11:45:24 -03002641 memset(&cp, 0, sizeof(cp));
2642 memcpy(&cp.lap, lap, sizeof(cp.lap));
2643 cp.length = length;
2644
2645 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2646}
Andre Guedes023d50492011-11-04 14:16:52 -03002647
2648int hci_cancel_inquiry(struct hci_dev *hdev)
2649{
2650 BT_DBG("%s", hdev->name);
2651
2652 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2653 return -EPERM;
2654
2655 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2656}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002657
2658module_param(enable_hs, bool, 0644);
2659MODULE_PARM_DESC(enable_hs, "Enable High Speed");