blob: 29a9b01c3b9ba505f2d38340968275812198a322 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Fabio Estevam8b281b92012-01-10 18:33:50 -020058bool enable_hs;
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020059
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200358
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
Andre Guedes6fbe1952012-02-03 17:47:58 -0300363 switch (discov->state) {
364 case DISCOVERY_INQUIRY:
365 case DISCOVERY_LE_SCAN:
366 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200367 return true;
368
Andre Guedes6fbe1952012-02-03 17:47:58 -0300369 default:
370 return false;
371 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200372}
373
Johan Hedbergff9ef572012-01-04 14:23:45 +0200374void hci_discovery_set_state(struct hci_dev *hdev, int state)
375{
376 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
377
378 if (hdev->discovery.state == state)
379 return;
380
381 switch (state) {
382 case DISCOVERY_STOPPED:
Andre Guedes4aab14e2012-02-17 20:39:36 -0300383 hdev->discovery.type = 0;
384
Andre Guedes7b99b652012-02-13 15:41:02 -0300385 if (hdev->discovery.state != DISCOVERY_STARTING)
386 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200387 break;
388 case DISCOVERY_STARTING:
389 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200390 case DISCOVERY_INQUIRY:
Andre Guedesc5990082012-02-03 17:47:57 -0300391 case DISCOVERY_LE_SCAN:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200392 mgmt_discovering(hdev, 1);
393 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200394 case DISCOVERY_RESOLVING:
395 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200396 case DISCOVERY_STOPPING:
397 break;
398 }
399
400 hdev->discovery.state = state;
401}
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403static void inquiry_cache_flush(struct hci_dev *hdev)
404{
Johan Hedberg30883512012-01-04 14:16:21 +0200405 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200406 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Johan Hedberg561aafb2012-01-04 13:31:59 +0200408 list_for_each_entry_safe(p, n, &cache->all, all) {
409 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200410 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200412
413 INIT_LIST_HEAD(&cache->unknown);
414 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200415 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419{
Johan Hedberg30883512012-01-04 14:16:21 +0200420 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200427 return e;
428 }
429
430 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Johan Hedberg561aafb2012-01-04 13:31:59 +0200433struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
434 bdaddr_t *bdaddr)
435{
Johan Hedberg30883512012-01-04 14:16:21 +0200436 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441 list_for_each_entry(e, &cache->unknown, list) {
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200449struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
450 bdaddr_t *bdaddr,
451 int state)
452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct inquiry_entry *e;
455
456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458 list_for_each_entry(e, &cache->resolve, list) {
459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 return e;
461 if (!bacmp(&e->data.bdaddr, bdaddr))
462 return e;
463 }
464
465 return NULL;
466}
467
Johan Hedberga3d4e202012-01-09 00:53:02 +0200468void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
469 struct inquiry_entry *ie)
470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct list_head *pos = &cache->resolve;
473 struct inquiry_entry *p;
474
475 list_del(&ie->list);
476
477 list_for_each_entry(p, &cache->resolve, list) {
478 if (p->name_state != NAME_PENDING &&
479 abs(p->data.rssi) >= abs(ie->data.rssi))
480 break;
481 pos = &p->list;
482 }
483
484 list_add(&ie->list, pos);
485}
486
Johan Hedberg31754052012-01-04 13:39:52 +0200487bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200488 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Johan Hedberg30883512012-01-04 14:16:21 +0200490 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200495 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200496 if (ie) {
497 if (ie->name_state == NAME_NEEDED &&
498 data->rssi != ie->data.rssi) {
499 ie->data.rssi = data->rssi;
500 hci_inquiry_cache_update_resolve(hdev, ie);
501 }
502
Johan Hedberg561aafb2012-01-04 13:31:59 +0200503 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200504 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200505
Johan Hedberg561aafb2012-01-04 13:31:59 +0200506 /* Entry not in the cache. Add new one. */
507 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
508 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200509 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200510
511 list_add(&ie->all, &cache->all);
512
513 if (name_known) {
514 ie->name_state = NAME_KNOWN;
515 } else {
516 ie->name_state = NAME_NOT_KNOWN;
517 list_add(&ie->list, &cache->unknown);
518 }
519
520update:
521 if (name_known && ie->name_state != NAME_KNOWN &&
522 ie->name_state != NAME_PENDING) {
523 ie->name_state = NAME_KNOWN;
524 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 }
526
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200527 memcpy(&ie->data, data, sizeof(*data));
528 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200530
531 if (ie->name_state == NAME_NOT_KNOWN)
532 return false;
533
534 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535}
536
537static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
538{
Johan Hedberg30883512012-01-04 14:16:21 +0200539 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 struct inquiry_info *info = (struct inquiry_info *) buf;
541 struct inquiry_entry *e;
542 int copied = 0;
543
Johan Hedberg561aafb2012-01-04 13:31:59 +0200544 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200546
547 if (copied >= num)
548 break;
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 bacpy(&info->bdaddr, &data->bdaddr);
551 info->pscan_rep_mode = data->pscan_rep_mode;
552 info->pscan_period_mode = data->pscan_period_mode;
553 info->pscan_mode = data->pscan_mode;
554 memcpy(info->dev_class, data->dev_class, 3);
555 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200558 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 }
560
561 BT_DBG("cache %p, copied %d", cache, copied);
562 return copied;
563}
564
565static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
566{
567 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
568 struct hci_cp_inquiry cp;
569
570 BT_DBG("%s", hdev->name);
571
572 if (test_bit(HCI_INQUIRY, &hdev->flags))
573 return;
574
575 /* Start Inquiry */
576 memcpy(&cp.lap, &ir->lap, 3);
577 cp.length = ir->length;
578 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200579 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580}
581
582int hci_inquiry(void __user *arg)
583{
584 __u8 __user *ptr = arg;
585 struct hci_inquiry_req ir;
586 struct hci_dev *hdev;
587 int err = 0, do_inquiry = 0, max_rsp;
588 long timeo;
589 __u8 *buf;
590
591 if (copy_from_user(&ir, ptr, sizeof(ir)))
592 return -EFAULT;
593
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200594 hdev = hci_dev_get(ir.dev_id);
595 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 return -ENODEV;
597
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300598 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900599 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200600 inquiry_cache_empty(hdev) ||
601 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 inquiry_cache_flush(hdev);
603 do_inquiry = 1;
604 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300605 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Marcel Holtmann04837f62006-07-03 10:02:33 +0200607 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200608
609 if (do_inquiry) {
610 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
611 if (err < 0)
612 goto done;
613 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615 /* for unlimited number of responses we will use buffer with 255 entries */
616 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
617
618 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
619 * copy it to the user space.
620 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100621 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200622 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 err = -ENOMEM;
624 goto done;
625 }
626
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300627 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300629 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
631 BT_DBG("num_rsp %d", ir.num_rsp);
632
633 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
634 ptr += sizeof(ir);
635 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
636 ir.num_rsp))
637 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900638 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 err = -EFAULT;
640
641 kfree(buf);
642
643done:
644 hci_dev_put(hdev);
645 return err;
646}
647
648/* ---- HCI ioctl helpers ---- */
649
650int hci_dev_open(__u16 dev)
651{
652 struct hci_dev *hdev;
653 int ret = 0;
654
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200655 hdev = hci_dev_get(dev);
656 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 return -ENODEV;
658
659 BT_DBG("%s %p", hdev->name, hdev);
660
661 hci_req_lock(hdev);
662
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200663 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
664 ret = -ERFKILL;
665 goto done;
666 }
667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (test_bit(HCI_UP, &hdev->flags)) {
669 ret = -EALREADY;
670 goto done;
671 }
672
673 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
674 set_bit(HCI_RAW, &hdev->flags);
675
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200676 /* Treat all non BR/EDR controllers as raw devices if
677 enable_hs is not set */
678 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100679 set_bit(HCI_RAW, &hdev->flags);
680
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (hdev->open(hdev)) {
682 ret = -EIO;
683 goto done;
684 }
685
686 if (!test_bit(HCI_RAW, &hdev->flags)) {
687 atomic_set(&hdev->cmd_cnt, 1);
688 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200689 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Marcel Holtmann04837f62006-07-03 10:02:33 +0200691 ret = __hci_request(hdev, hci_init_req, 0,
692 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Andre Guedeseead27d2011-06-30 19:20:55 -0300694 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300695 ret = __hci_request(hdev, hci_le_init_req, 0,
696 msecs_to_jiffies(HCI_INIT_TIMEOUT));
697
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 clear_bit(HCI_INIT, &hdev->flags);
699 }
700
701 if (!ret) {
702 hci_dev_hold(hdev);
703 set_bit(HCI_UP, &hdev->flags);
704 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200705 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300706 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200707 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300708 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200709 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900710 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200712 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200713 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400714 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
716 skb_queue_purge(&hdev->cmd_q);
717 skb_queue_purge(&hdev->rx_q);
718
719 if (hdev->flush)
720 hdev->flush(hdev);
721
722 if (hdev->sent_cmd) {
723 kfree_skb(hdev->sent_cmd);
724 hdev->sent_cmd = NULL;
725 }
726
727 hdev->close(hdev);
728 hdev->flags = 0;
729 }
730
731done:
732 hci_req_unlock(hdev);
733 hci_dev_put(hdev);
734 return ret;
735}
736
737static int hci_dev_do_close(struct hci_dev *hdev)
738{
739 BT_DBG("%s %p", hdev->name, hdev);
740
Andre Guedes28b75a82012-02-03 17:48:00 -0300741 cancel_work_sync(&hdev->le_scan);
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 hci_req_cancel(hdev, ENODEV);
744 hci_req_lock(hdev);
745
746 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300747 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 hci_req_unlock(hdev);
749 return 0;
750 }
751
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200752 /* Flush RX and TX works */
753 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400754 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200756 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200757 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200758 hdev->discov_timeout = 0;
759 }
760
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200761 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200762 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200763
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200764 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200765 cancel_delayed_work(&hdev->service_cache);
766
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300767 cancel_delayed_work_sync(&hdev->le_scan_disable);
768
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300769 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 inquiry_cache_flush(hdev);
771 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300772 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
774 hci_notify(hdev, HCI_DEV_DOWN);
775
776 if (hdev->flush)
777 hdev->flush(hdev);
778
779 /* Reset device */
780 skb_queue_purge(&hdev->cmd_q);
781 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200782 if (!test_bit(HCI_RAW, &hdev->flags) &&
783 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200785 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200786 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 clear_bit(HCI_INIT, &hdev->flags);
788 }
789
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200790 /* flush cmd work */
791 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
793 /* Drop queues */
794 skb_queue_purge(&hdev->rx_q);
795 skb_queue_purge(&hdev->cmd_q);
796 skb_queue_purge(&hdev->raw_q);
797
798 /* Drop last sent command */
799 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300800 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 kfree_skb(hdev->sent_cmd);
802 hdev->sent_cmd = NULL;
803 }
804
805 /* After this point our queues are empty
806 * and no tasks are scheduled. */
807 hdev->close(hdev);
808
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300809 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200810 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300811 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 /* Clear flags */
814 hdev->flags = 0;
815
816 hci_req_unlock(hdev);
817
818 hci_dev_put(hdev);
819 return 0;
820}
821
822int hci_dev_close(__u16 dev)
823{
824 struct hci_dev *hdev;
825 int err;
826
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200827 hdev = hci_dev_get(dev);
828 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 return -ENODEV;
830 err = hci_dev_do_close(hdev);
831 hci_dev_put(hdev);
832 return err;
833}
834
835int hci_dev_reset(__u16 dev)
836{
837 struct hci_dev *hdev;
838 int ret = 0;
839
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200840 hdev = hci_dev_get(dev);
841 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 return -ENODEV;
843
844 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846 if (!test_bit(HCI_UP, &hdev->flags))
847 goto done;
848
849 /* Drop queues */
850 skb_queue_purge(&hdev->rx_q);
851 skb_queue_purge(&hdev->cmd_q);
852
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300853 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 inquiry_cache_flush(hdev);
855 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300856 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
858 if (hdev->flush)
859 hdev->flush(hdev);
860
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900861 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300862 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200865 ret = __hci_request(hdev, hci_reset_req, 0,
866 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 hci_req_unlock(hdev);
870 hci_dev_put(hdev);
871 return ret;
872}
873
874int hci_dev_reset_stat(__u16 dev)
875{
876 struct hci_dev *hdev;
877 int ret = 0;
878
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200879 hdev = hci_dev_get(dev);
880 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 return -ENODEV;
882
883 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
884
885 hci_dev_put(hdev);
886
887 return ret;
888}
889
890int hci_dev_cmd(unsigned int cmd, void __user *arg)
891{
892 struct hci_dev *hdev;
893 struct hci_dev_req dr;
894 int err = 0;
895
896 if (copy_from_user(&dr, arg, sizeof(dr)))
897 return -EFAULT;
898
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200899 hdev = hci_dev_get(dr.dev_id);
900 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 return -ENODEV;
902
903 switch (cmd) {
904 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200905 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
906 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 break;
908
909 case HCISETENCRYPT:
910 if (!lmp_encrypt_capable(hdev)) {
911 err = -EOPNOTSUPP;
912 break;
913 }
914
915 if (!test_bit(HCI_AUTH, &hdev->flags)) {
916 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200917 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
918 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 if (err)
920 break;
921 }
922
Marcel Holtmann04837f62006-07-03 10:02:33 +0200923 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 break;
926
927 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200928 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
929 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 break;
931
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200932 case HCISETLINKPOL:
933 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
934 msecs_to_jiffies(HCI_INIT_TIMEOUT));
935 break;
936
937 case HCISETLINKMODE:
938 hdev->link_mode = ((__u16) dr.dev_opt) &
939 (HCI_LM_MASTER | HCI_LM_ACCEPT);
940 break;
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 case HCISETPTYPE:
943 hdev->pkt_type = (__u16) dr.dev_opt;
944 break;
945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200947 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
948 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 break;
950
951 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200952 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
953 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 break;
955
956 default:
957 err = -EINVAL;
958 break;
959 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 hci_dev_put(hdev);
962 return err;
963}
964
965int hci_get_dev_list(void __user *arg)
966{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200967 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 struct hci_dev_list_req *dl;
969 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 int n = 0, size, err;
971 __u16 dev_num;
972
973 if (get_user(dev_num, (__u16 __user *) arg))
974 return -EFAULT;
975
976 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
977 return -EINVAL;
978
979 size = sizeof(*dl) + dev_num * sizeof(*dr);
980
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200981 dl = kzalloc(size, GFP_KERNEL);
982 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 return -ENOMEM;
984
985 dr = dl->dev_req;
986
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200987 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200988 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200989 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200990 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200991
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200992 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
993 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 (dr + n)->dev_id = hdev->id;
996 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 if (++n >= dev_num)
999 break;
1000 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001001 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 dl->dev_num = n;
1004 size = sizeof(*dl) + n * sizeof(*dr);
1005
1006 err = copy_to_user(arg, dl, size);
1007 kfree(dl);
1008
1009 return err ? -EFAULT : 0;
1010}
1011
1012int hci_get_dev_info(void __user *arg)
1013{
1014 struct hci_dev *hdev;
1015 struct hci_dev_info di;
1016 int err = 0;
1017
1018 if (copy_from_user(&di, arg, sizeof(di)))
1019 return -EFAULT;
1020
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001021 hdev = hci_dev_get(di.dev_id);
1022 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 return -ENODEV;
1024
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001025 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001026 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001027
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001028 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1029 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001030
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 strcpy(di.name, hdev->name);
1032 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001033 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 di.flags = hdev->flags;
1035 di.pkt_type = hdev->pkt_type;
1036 di.acl_mtu = hdev->acl_mtu;
1037 di.acl_pkts = hdev->acl_pkts;
1038 di.sco_mtu = hdev->sco_mtu;
1039 di.sco_pkts = hdev->sco_pkts;
1040 di.link_policy = hdev->link_policy;
1041 di.link_mode = hdev->link_mode;
1042
1043 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1044 memcpy(&di.features, &hdev->features, sizeof(di.features));
1045
1046 if (copy_to_user(arg, &di, sizeof(di)))
1047 err = -EFAULT;
1048
1049 hci_dev_put(hdev);
1050
1051 return err;
1052}
1053
1054/* ---- Interface to HCI drivers ---- */
1055
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001056static int hci_rfkill_set_block(void *data, bool blocked)
1057{
1058 struct hci_dev *hdev = data;
1059
1060 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1061
1062 if (!blocked)
1063 return 0;
1064
1065 hci_dev_do_close(hdev);
1066
1067 return 0;
1068}
1069
1070static const struct rfkill_ops hci_rfkill_ops = {
1071 .set_block = hci_rfkill_set_block,
1072};
1073
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074/* Alloc HCI device */
1075struct hci_dev *hci_alloc_dev(void)
1076{
1077 struct hci_dev *hdev;
1078
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001079 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 if (!hdev)
1081 return NULL;
1082
David Herrmann0ac7e702011-10-08 14:58:47 +02001083 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 skb_queue_head_init(&hdev->driver_init);
1085
1086 return hdev;
1087}
1088EXPORT_SYMBOL(hci_alloc_dev);
1089
1090/* Free HCI device */
1091void hci_free_dev(struct hci_dev *hdev)
1092{
1093 skb_queue_purge(&hdev->driver_init);
1094
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001095 /* will free via device release */
1096 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097}
1098EXPORT_SYMBOL(hci_free_dev);
1099
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001100static void hci_power_on(struct work_struct *work)
1101{
1102 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1103
1104 BT_DBG("%s", hdev->name);
1105
1106 if (hci_dev_open(hdev->id) < 0)
1107 return;
1108
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001109 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001110 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001111 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001112
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001113 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001114 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001115}
1116
1117static void hci_power_off(struct work_struct *work)
1118{
Johan Hedberg32435532011-11-07 22:16:04 +02001119 struct hci_dev *hdev = container_of(work, struct hci_dev,
1120 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001121
1122 BT_DBG("%s", hdev->name);
1123
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001124 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg32435532011-11-07 22:16:04 +02001125
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001126 hci_dev_close(hdev->id);
1127}
1128
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001129static void hci_discov_off(struct work_struct *work)
1130{
1131 struct hci_dev *hdev;
1132 u8 scan = SCAN_PAGE;
1133
1134 hdev = container_of(work, struct hci_dev, discov_off.work);
1135
1136 BT_DBG("%s", hdev->name);
1137
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001138 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001139
1140 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1141
1142 hdev->discov_timeout = 0;
1143
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001144 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001145}
1146
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001147int hci_uuids_clear(struct hci_dev *hdev)
1148{
1149 struct list_head *p, *n;
1150
1151 list_for_each_safe(p, n, &hdev->uuids) {
1152 struct bt_uuid *uuid;
1153
1154 uuid = list_entry(p, struct bt_uuid, list);
1155
1156 list_del(p);
1157 kfree(uuid);
1158 }
1159
1160 return 0;
1161}
1162
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001163int hci_link_keys_clear(struct hci_dev *hdev)
1164{
1165 struct list_head *p, *n;
1166
1167 list_for_each_safe(p, n, &hdev->link_keys) {
1168 struct link_key *key;
1169
1170 key = list_entry(p, struct link_key, list);
1171
1172 list_del(p);
1173 kfree(key);
1174 }
1175
1176 return 0;
1177}
1178
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001179int hci_smp_ltks_clear(struct hci_dev *hdev)
1180{
1181 struct smp_ltk *k, *tmp;
1182
1183 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1184 list_del(&k->list);
1185 kfree(k);
1186 }
1187
1188 return 0;
1189}
1190
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001191struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1192{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001193 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001194
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001195 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001196 if (bacmp(bdaddr, &k->bdaddr) == 0)
1197 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001198
1199 return NULL;
1200}
1201
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001202static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1203 u8 key_type, u8 old_key_type)
1204{
1205 /* Legacy key */
1206 if (key_type < 0x03)
1207 return 1;
1208
1209 /* Debug keys are insecure so don't store them persistently */
1210 if (key_type == HCI_LK_DEBUG_COMBINATION)
1211 return 0;
1212
1213 /* Changed combination key and there's no previous one */
1214 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1215 return 0;
1216
1217 /* Security mode 3 case */
1218 if (!conn)
1219 return 1;
1220
1221 /* Neither local nor remote side had no-bonding as requirement */
1222 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1223 return 1;
1224
1225 /* Local side had dedicated bonding as requirement */
1226 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1227 return 1;
1228
1229 /* Remote side had dedicated bonding as requirement */
1230 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1231 return 1;
1232
1233 /* If none of the above criteria match, then don't store the key
1234 * persistently */
1235 return 0;
1236}
1237
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001238struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001239{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001240 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001241
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001242 list_for_each_entry(k, &hdev->long_term_keys, list) {
1243 if (k->ediv != ediv ||
1244 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001245 continue;
1246
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001247 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001248 }
1249
1250 return NULL;
1251}
1252EXPORT_SYMBOL(hci_find_ltk);
1253
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001254struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1255 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001256{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001257 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001258
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001259 list_for_each_entry(k, &hdev->long_term_keys, list)
1260 if (addr_type == k->bdaddr_type &&
1261 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001262 return k;
1263
1264 return NULL;
1265}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001266EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001267
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001268int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1269 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001270{
1271 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001272 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001273
1274 old_key = hci_find_link_key(hdev, bdaddr);
1275 if (old_key) {
1276 old_key_type = old_key->type;
1277 key = old_key;
1278 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001279 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001280 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1281 if (!key)
1282 return -ENOMEM;
1283 list_add(&key->list, &hdev->link_keys);
1284 }
1285
1286 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1287
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001288 /* Some buggy controller combinations generate a changed
1289 * combination key for legacy pairing even when there's no
1290 * previous key */
1291 if (type == HCI_LK_CHANGED_COMBINATION &&
1292 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001293 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001294 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001295 if (conn)
1296 conn->key_type = type;
1297 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001298
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001299 bacpy(&key->bdaddr, bdaddr);
1300 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001301 key->pin_len = pin_len;
1302
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001303 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001304 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001305 else
1306 key->type = type;
1307
Johan Hedberg4df378a2011-04-28 11:29:03 -07001308 if (!new_key)
1309 return 0;
1310
1311 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1312
Johan Hedberg744cf192011-11-08 20:40:14 +02001313 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001314
1315 if (!persistent) {
1316 list_del(&key->list);
1317 kfree(key);
1318 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001319
1320 return 0;
1321}
1322
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001323int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1324 int new_key, u8 authenticated, u8 tk[16],
1325 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001327 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001328
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001329 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1330 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001331
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001332 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1333 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001334 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001335 else {
1336 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001337 if (!key)
1338 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001339 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001340 }
1341
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001342 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001343 key->bdaddr_type = addr_type;
1344 memcpy(key->val, tk, sizeof(key->val));
1345 key->authenticated = authenticated;
1346 key->ediv = ediv;
1347 key->enc_size = enc_size;
1348 key->type = type;
1349 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001350
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001351 if (!new_key)
1352 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001353
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001354 if (type & HCI_SMP_LTK)
1355 mgmt_new_ltk(hdev, key, 1);
1356
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001357 return 0;
1358}
1359
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001360int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1361{
1362 struct link_key *key;
1363
1364 key = hci_find_link_key(hdev, bdaddr);
1365 if (!key)
1366 return -ENOENT;
1367
1368 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1369
1370 list_del(&key->list);
1371 kfree(key);
1372
1373 return 0;
1374}
1375
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001376int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1377{
1378 struct smp_ltk *k, *tmp;
1379
1380 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1381 if (bacmp(bdaddr, &k->bdaddr))
1382 continue;
1383
1384 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1385
1386 list_del(&k->list);
1387 kfree(k);
1388 }
1389
1390 return 0;
1391}
1392
Ville Tervo6bd32322011-02-16 16:32:41 +02001393/* HCI command timer function */
1394static void hci_cmd_timer(unsigned long arg)
1395{
1396 struct hci_dev *hdev = (void *) arg;
1397
1398 BT_ERR("%s command tx timeout", hdev->name);
1399 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001400 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001401}
1402
Szymon Janc2763eda2011-03-22 13:12:22 +01001403struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1404 bdaddr_t *bdaddr)
1405{
1406 struct oob_data *data;
1407
1408 list_for_each_entry(data, &hdev->remote_oob_data, list)
1409 if (bacmp(bdaddr, &data->bdaddr) == 0)
1410 return data;
1411
1412 return NULL;
1413}
1414
1415int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1416{
1417 struct oob_data *data;
1418
1419 data = hci_find_remote_oob_data(hdev, bdaddr);
1420 if (!data)
1421 return -ENOENT;
1422
1423 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1424
1425 list_del(&data->list);
1426 kfree(data);
1427
1428 return 0;
1429}
1430
1431int hci_remote_oob_data_clear(struct hci_dev *hdev)
1432{
1433 struct oob_data *data, *n;
1434
1435 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1436 list_del(&data->list);
1437 kfree(data);
1438 }
1439
1440 return 0;
1441}
1442
1443int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1444 u8 *randomizer)
1445{
1446 struct oob_data *data;
1447
1448 data = hci_find_remote_oob_data(hdev, bdaddr);
1449
1450 if (!data) {
1451 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1452 if (!data)
1453 return -ENOMEM;
1454
1455 bacpy(&data->bdaddr, bdaddr);
1456 list_add(&data->list, &hdev->remote_oob_data);
1457 }
1458
1459 memcpy(data->hash, hash, sizeof(data->hash));
1460 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1461
1462 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1463
1464 return 0;
1465}
1466
Antti Julkub2a66aa2011-06-15 12:01:14 +03001467struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1468 bdaddr_t *bdaddr)
1469{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001470 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001471
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001472 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001473 if (bacmp(bdaddr, &b->bdaddr) == 0)
1474 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001475
1476 return NULL;
1477}
1478
1479int hci_blacklist_clear(struct hci_dev *hdev)
1480{
1481 struct list_head *p, *n;
1482
1483 list_for_each_safe(p, n, &hdev->blacklist) {
1484 struct bdaddr_list *b;
1485
1486 b = list_entry(p, struct bdaddr_list, list);
1487
1488 list_del(p);
1489 kfree(b);
1490 }
1491
1492 return 0;
1493}
1494
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001495int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001496{
1497 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001498
1499 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1500 return -EBADF;
1501
Antti Julku5e762442011-08-25 16:48:02 +03001502 if (hci_blacklist_lookup(hdev, bdaddr))
1503 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001504
1505 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001506 if (!entry)
1507 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508
1509 bacpy(&entry->bdaddr, bdaddr);
1510
1511 list_add(&entry->list, &hdev->blacklist);
1512
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001513 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001514}
1515
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001516int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001517{
1518 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001519
Szymon Janc1ec918c2011-11-16 09:32:21 +01001520 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001521 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001522
1523 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001524 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001525 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001526
1527 list_del(&entry->list);
1528 kfree(entry);
1529
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001530 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001531}
1532
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001533static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001534{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001535 struct hci_dev *hdev = container_of(work, struct hci_dev,
1536 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001537
1538 hci_dev_lock(hdev);
1539
1540 hci_adv_entries_clear(hdev);
1541
1542 hci_dev_unlock(hdev);
1543}
1544
Andre Guedes76c86862011-05-26 16:23:50 -03001545int hci_adv_entries_clear(struct hci_dev *hdev)
1546{
1547 struct adv_entry *entry, *tmp;
1548
1549 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1550 list_del(&entry->list);
1551 kfree(entry);
1552 }
1553
1554 BT_DBG("%s adv cache cleared", hdev->name);
1555
1556 return 0;
1557}
1558
1559struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1560{
1561 struct adv_entry *entry;
1562
1563 list_for_each_entry(entry, &hdev->adv_entries, list)
1564 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1565 return entry;
1566
1567 return NULL;
1568}
1569
1570static inline int is_connectable_adv(u8 evt_type)
1571{
1572 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1573 return 1;
1574
1575 return 0;
1576}
1577
1578int hci_add_adv_entry(struct hci_dev *hdev,
1579 struct hci_ev_le_advertising_info *ev)
1580{
1581 struct adv_entry *entry;
1582
1583 if (!is_connectable_adv(ev->evt_type))
1584 return -EINVAL;
1585
1586 /* Only new entries should be added to adv_entries. So, if
1587 * bdaddr was found, don't add it. */
1588 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1589 return 0;
1590
Andre Guedes4777bfd2012-01-30 23:31:28 -03001591 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001592 if (!entry)
1593 return -ENOMEM;
1594
1595 bacpy(&entry->bdaddr, &ev->bdaddr);
1596 entry->bdaddr_type = ev->bdaddr_type;
1597
1598 list_add(&entry->list, &hdev->adv_entries);
1599
1600 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1601 batostr(&entry->bdaddr), entry->bdaddr_type);
1602
1603 return 0;
1604}
1605
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001606static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1607{
1608 struct le_scan_params *param = (struct le_scan_params *) opt;
1609 struct hci_cp_le_set_scan_param cp;
1610
1611 memset(&cp, 0, sizeof(cp));
1612 cp.type = param->type;
1613 cp.interval = cpu_to_le16(param->interval);
1614 cp.window = cpu_to_le16(param->window);
1615
1616 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1617}
1618
1619static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1620{
1621 struct hci_cp_le_set_scan_enable cp;
1622
1623 memset(&cp, 0, sizeof(cp));
1624 cp.enable = 1;
1625
1626 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1627}
1628
1629static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1630 u16 window, int timeout)
1631{
1632 long timeo = msecs_to_jiffies(3000);
1633 struct le_scan_params param;
1634 int err;
1635
1636 BT_DBG("%s", hdev->name);
1637
1638 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1639 return -EINPROGRESS;
1640
1641 param.type = type;
1642 param.interval = interval;
1643 param.window = window;
1644
1645 hci_req_lock(hdev);
1646
1647 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1648 timeo);
1649 if (!err)
1650 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1651
1652 hci_req_unlock(hdev);
1653
1654 if (err < 0)
1655 return err;
1656
1657 schedule_delayed_work(&hdev->le_scan_disable,
1658 msecs_to_jiffies(timeout));
1659
1660 return 0;
1661}
1662
1663static void le_scan_disable_work(struct work_struct *work)
1664{
1665 struct hci_dev *hdev = container_of(work, struct hci_dev,
1666 le_scan_disable.work);
1667 struct hci_cp_le_set_scan_enable cp;
1668
1669 BT_DBG("%s", hdev->name);
1670
1671 memset(&cp, 0, sizeof(cp));
1672
1673 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1674}
1675
Andre Guedes28b75a82012-02-03 17:48:00 -03001676static void le_scan_work(struct work_struct *work)
1677{
1678 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1679 struct le_scan_params *param = &hdev->le_scan_params;
1680
1681 BT_DBG("%s", hdev->name);
1682
1683 hci_do_le_scan(hdev, param->type, param->interval,
1684 param->window, param->timeout);
1685}
1686
1687int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1688 int timeout)
1689{
1690 struct le_scan_params *param = &hdev->le_scan_params;
1691
1692 BT_DBG("%s", hdev->name);
1693
1694 if (work_busy(&hdev->le_scan))
1695 return -EINPROGRESS;
1696
1697 param->type = type;
1698 param->interval = interval;
1699 param->window = window;
1700 param->timeout = timeout;
1701
1702 queue_work(system_long_wq, &hdev->le_scan);
1703
1704 return 0;
1705}
1706
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev)
1709{
1710 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001711 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001713 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
David Herrmann010666a2012-01-07 15:47:07 +01001715 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 return -EINVAL;
1717
Mat Martineau08add512011-11-02 16:18:36 -07001718 /* Do not allow HCI_AMP devices to register at index 0,
1719 * so the index can be used as the AMP controller ID.
1720 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1722
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001723 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
1725 /* Find first available device id */
1726 list_for_each(p, &hci_dev_list) {
1727 if (list_entry(p, struct hci_dev, list)->id != id)
1728 break;
1729 head = p; id++;
1730 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 sprintf(hdev->name, "hci%d", id);
1733 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001734 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001736 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001739 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001741 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001743 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Marcel Holtmann04837f62006-07-03 10:02:33 +02001745 hdev->idle_timeout = 0;
1746 hdev->sniff_max_interval = 800;
1747 hdev->sniff_min_interval = 80;
1748
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001749 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001750 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001751 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001752
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
1754 skb_queue_head_init(&hdev->rx_q);
1755 skb_queue_head_init(&hdev->cmd_q);
1756 skb_queue_head_init(&hdev->raw_q);
1757
Ville Tervo6bd32322011-02-16 16:32:41 +02001758 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1759
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301760 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001761 hdev->reassembly[i] = NULL;
1762
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001764 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
Johan Hedberg30883512012-01-04 14:16:21 +02001766 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
1768 hci_conn_hash_init(hdev);
1769
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001770 INIT_LIST_HEAD(&hdev->mgmt_pending);
1771
David Millerea4bd8b2010-07-30 21:54:49 -07001772 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001773
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001774 INIT_LIST_HEAD(&hdev->uuids);
1775
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001776 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001777 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001778
Szymon Janc2763eda2011-03-22 13:12:22 +01001779 INIT_LIST_HEAD(&hdev->remote_oob_data);
1780
Andre Guedes76c86862011-05-26 16:23:50 -03001781 INIT_LIST_HEAD(&hdev->adv_entries);
1782
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001783 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001784 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001785 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001786
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001787 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1790
1791 atomic_set(&hdev->promisc, 0);
1792
Andre Guedes28b75a82012-02-03 17:48:00 -03001793 INIT_WORK(&hdev->le_scan, le_scan_work);
1794
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001795 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1796
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001797 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001799 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1800 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001801 if (!hdev->workqueue) {
1802 error = -ENOMEM;
1803 goto err;
1804 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001805
David Herrmann33ca9542011-10-08 14:58:49 +02001806 error = hci_add_sysfs(hdev);
1807 if (error < 0)
1808 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001810 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1811 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1812 if (hdev->rfkill) {
1813 if (rfkill_register(hdev->rfkill) < 0) {
1814 rfkill_destroy(hdev->rfkill);
1815 hdev->rfkill = NULL;
1816 }
1817 }
1818
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001819 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1820 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001821 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001822
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001824 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
1826 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001827
David Herrmann33ca9542011-10-08 14:58:49 +02001828err_wqueue:
1829 destroy_workqueue(hdev->workqueue);
1830err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001831 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001832 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001833 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001834
David Herrmann33ca9542011-10-08 14:58:49 +02001835 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836}
1837EXPORT_SYMBOL(hci_register_dev);
1838
1839/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001840void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841{
Marcel Holtmannef222012007-07-11 06:42:04 +02001842 int i;
1843
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001844 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001846 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001848 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
1850 hci_dev_do_close(hdev);
1851
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301852 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001853 kfree_skb(hdev->reassembly[i]);
1854
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001855 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001856 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001857 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001858 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001859 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001860 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001861
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001862 /* mgmt_index_removed should take care of emptying the
1863 * pending list */
1864 BUG_ON(!list_empty(&hdev->mgmt_pending));
1865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 hci_notify(hdev, HCI_DEV_UNREG);
1867
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001868 if (hdev->rfkill) {
1869 rfkill_unregister(hdev->rfkill);
1870 rfkill_destroy(hdev->rfkill);
1871 }
1872
David Herrmannce242972011-10-08 14:58:48 +02001873 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001874
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001875 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001876
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001877 destroy_workqueue(hdev->workqueue);
1878
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001879 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001880 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001881 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001882 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001883 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001884 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001885 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001886 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001887
David Herrmanndc946bd2012-01-07 15:47:24 +01001888 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889}
1890EXPORT_SYMBOL(hci_unregister_dev);
1891
1892/* Suspend HCI device */
1893int hci_suspend_dev(struct hci_dev *hdev)
1894{
1895 hci_notify(hdev, HCI_DEV_SUSPEND);
1896 return 0;
1897}
1898EXPORT_SYMBOL(hci_suspend_dev);
1899
1900/* Resume HCI device */
1901int hci_resume_dev(struct hci_dev *hdev)
1902{
1903 hci_notify(hdev, HCI_DEV_RESUME);
1904 return 0;
1905}
1906EXPORT_SYMBOL(hci_resume_dev);
1907
Marcel Holtmann76bca882009-11-18 00:40:39 +01001908/* Receive frame from HCI drivers */
1909int hci_recv_frame(struct sk_buff *skb)
1910{
1911 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1912 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1913 && !test_bit(HCI_INIT, &hdev->flags))) {
1914 kfree_skb(skb);
1915 return -ENXIO;
1916 }
1917
1918 /* Incomming skb */
1919 bt_cb(skb)->incoming = 1;
1920
1921 /* Time stamp */
1922 __net_timestamp(skb);
1923
Marcel Holtmann76bca882009-11-18 00:40:39 +01001924 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001925 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001926
Marcel Holtmann76bca882009-11-18 00:40:39 +01001927 return 0;
1928}
1929EXPORT_SYMBOL(hci_recv_frame);
1930
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301931static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001932 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301933{
1934 int len = 0;
1935 int hlen = 0;
1936 int remain = count;
1937 struct sk_buff *skb;
1938 struct bt_skb_cb *scb;
1939
1940 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1941 index >= NUM_REASSEMBLY)
1942 return -EILSEQ;
1943
1944 skb = hdev->reassembly[index];
1945
1946 if (!skb) {
1947 switch (type) {
1948 case HCI_ACLDATA_PKT:
1949 len = HCI_MAX_FRAME_SIZE;
1950 hlen = HCI_ACL_HDR_SIZE;
1951 break;
1952 case HCI_EVENT_PKT:
1953 len = HCI_MAX_EVENT_SIZE;
1954 hlen = HCI_EVENT_HDR_SIZE;
1955 break;
1956 case HCI_SCODATA_PKT:
1957 len = HCI_MAX_SCO_SIZE;
1958 hlen = HCI_SCO_HDR_SIZE;
1959 break;
1960 }
1961
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001962 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301963 if (!skb)
1964 return -ENOMEM;
1965
1966 scb = (void *) skb->cb;
1967 scb->expect = hlen;
1968 scb->pkt_type = type;
1969
1970 skb->dev = (void *) hdev;
1971 hdev->reassembly[index] = skb;
1972 }
1973
1974 while (count) {
1975 scb = (void *) skb->cb;
1976 len = min(scb->expect, (__u16)count);
1977
1978 memcpy(skb_put(skb, len), data, len);
1979
1980 count -= len;
1981 data += len;
1982 scb->expect -= len;
1983 remain = count;
1984
1985 switch (type) {
1986 case HCI_EVENT_PKT:
1987 if (skb->len == HCI_EVENT_HDR_SIZE) {
1988 struct hci_event_hdr *h = hci_event_hdr(skb);
1989 scb->expect = h->plen;
1990
1991 if (skb_tailroom(skb) < scb->expect) {
1992 kfree_skb(skb);
1993 hdev->reassembly[index] = NULL;
1994 return -ENOMEM;
1995 }
1996 }
1997 break;
1998
1999 case HCI_ACLDATA_PKT:
2000 if (skb->len == HCI_ACL_HDR_SIZE) {
2001 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2002 scb->expect = __le16_to_cpu(h->dlen);
2003
2004 if (skb_tailroom(skb) < scb->expect) {
2005 kfree_skb(skb);
2006 hdev->reassembly[index] = NULL;
2007 return -ENOMEM;
2008 }
2009 }
2010 break;
2011
2012 case HCI_SCODATA_PKT:
2013 if (skb->len == HCI_SCO_HDR_SIZE) {
2014 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2015 scb->expect = h->dlen;
2016
2017 if (skb_tailroom(skb) < scb->expect) {
2018 kfree_skb(skb);
2019 hdev->reassembly[index] = NULL;
2020 return -ENOMEM;
2021 }
2022 }
2023 break;
2024 }
2025
2026 if (scb->expect == 0) {
2027 /* Complete frame */
2028
2029 bt_cb(skb)->pkt_type = type;
2030 hci_recv_frame(skb);
2031
2032 hdev->reassembly[index] = NULL;
2033 return remain;
2034 }
2035 }
2036
2037 return remain;
2038}
2039
Marcel Holtmannef222012007-07-11 06:42:04 +02002040int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2041{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302042 int rem = 0;
2043
Marcel Holtmannef222012007-07-11 06:42:04 +02002044 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2045 return -EILSEQ;
2046
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002047 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002048 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302049 if (rem < 0)
2050 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002051
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302052 data += (count - rem);
2053 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002054 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002055
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302056 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002057}
2058EXPORT_SYMBOL(hci_recv_fragment);
2059
Suraj Sumangala99811512010-07-14 13:02:19 +05302060#define STREAM_REASSEMBLY 0
2061
2062int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2063{
2064 int type;
2065 int rem = 0;
2066
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002067 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302068 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2069
2070 if (!skb) {
2071 struct { char type; } *pkt;
2072
2073 /* Start of the frame */
2074 pkt = data;
2075 type = pkt->type;
2076
2077 data++;
2078 count--;
2079 } else
2080 type = bt_cb(skb)->pkt_type;
2081
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002082 rem = hci_reassembly(hdev, type, data, count,
2083 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302084 if (rem < 0)
2085 return rem;
2086
2087 data += (count - rem);
2088 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002089 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302090
2091 return rem;
2092}
2093EXPORT_SYMBOL(hci_recv_stream_fragment);
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095/* ---- Interface to upper protocols ---- */
2096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097int hci_register_cb(struct hci_cb *cb)
2098{
2099 BT_DBG("%p name %s", cb, cb->name);
2100
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002101 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002103 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
2105 return 0;
2106}
2107EXPORT_SYMBOL(hci_register_cb);
2108
2109int hci_unregister_cb(struct hci_cb *cb)
2110{
2111 BT_DBG("%p name %s", cb, cb->name);
2112
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002113 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002115 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116
2117 return 0;
2118}
2119EXPORT_SYMBOL(hci_unregister_cb);
2120
2121static int hci_send_frame(struct sk_buff *skb)
2122{
2123 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2124
2125 if (!hdev) {
2126 kfree_skb(skb);
2127 return -ENODEV;
2128 }
2129
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002130 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
2132 if (atomic_read(&hdev->promisc)) {
2133 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002134 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002136 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 }
2138
2139 /* Get rid of skb owner, prior to sending to the driver. */
2140 skb_orphan(skb);
2141
2142 return hdev->send(skb);
2143}
2144
2145/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002146int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147{
2148 int len = HCI_COMMAND_HDR_SIZE + plen;
2149 struct hci_command_hdr *hdr;
2150 struct sk_buff *skb;
2151
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002152 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
2154 skb = bt_skb_alloc(len, GFP_ATOMIC);
2155 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002156 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 return -ENOMEM;
2158 }
2159
2160 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002161 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 hdr->plen = plen;
2163
2164 if (plen)
2165 memcpy(skb_put(skb, plen), param, plen);
2166
2167 BT_DBG("skb len %d", skb->len);
2168
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002169 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002171
Johan Hedberga5040ef2011-01-10 13:28:59 +02002172 if (test_bit(HCI_INIT, &hdev->flags))
2173 hdev->init_last_cmd = opcode;
2174
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002176 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
2178 return 0;
2179}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
2181/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002182void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183{
2184 struct hci_command_hdr *hdr;
2185
2186 if (!hdev->sent_cmd)
2187 return NULL;
2188
2189 hdr = (void *) hdev->sent_cmd->data;
2190
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002191 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 return NULL;
2193
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002194 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
2196 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2197}
2198
2199/* Send ACL data */
2200static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2201{
2202 struct hci_acl_hdr *hdr;
2203 int len = skb->len;
2204
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002205 skb_push(skb, HCI_ACL_HDR_SIZE);
2206 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002207 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002208 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2209 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210}
2211
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002212static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2213 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214{
2215 struct hci_dev *hdev = conn->hdev;
2216 struct sk_buff *list;
2217
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002218 list = skb_shinfo(skb)->frag_list;
2219 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 /* Non fragmented */
2221 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2222
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002223 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 } else {
2225 /* Fragmented */
2226 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2227
2228 skb_shinfo(skb)->frag_list = NULL;
2229
2230 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002231 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002233 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002234
2235 flags &= ~ACL_START;
2236 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 do {
2238 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002239
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002241 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002242 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
2244 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2245
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002246 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 } while (list);
2248
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002249 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002251}
2252
2253void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2254{
2255 struct hci_conn *conn = chan->conn;
2256 struct hci_dev *hdev = conn->hdev;
2257
2258 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2259
2260 skb->dev = (void *) hdev;
2261 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2262 hci_add_acl_hdr(skb, conn->handle, flags);
2263
2264 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002266 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267}
2268EXPORT_SYMBOL(hci_send_acl);
2269
2270/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002271void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272{
2273 struct hci_dev *hdev = conn->hdev;
2274 struct hci_sco_hdr hdr;
2275
2276 BT_DBG("%s len %d", hdev->name, skb->len);
2277
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002278 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 hdr.dlen = skb->len;
2280
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002281 skb_push(skb, HCI_SCO_HDR_SIZE);
2282 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002283 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
2285 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002286 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002287
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002289 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290}
2291EXPORT_SYMBOL(hci_send_sco);
2292
2293/* ---- HCI TX task (outgoing data) ---- */
2294
2295/* HCI Connection scheduler */
2296static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2297{
2298 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002299 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002302 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002304
2305 rcu_read_lock();
2306
2307 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002308 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002310
2311 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2312 continue;
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 num++;
2315
2316 if (c->sent < min) {
2317 min = c->sent;
2318 conn = c;
2319 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002320
2321 if (hci_conn_num(hdev, type) == num)
2322 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 }
2324
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002325 rcu_read_unlock();
2326
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002328 int cnt, q;
2329
2330 switch (conn->type) {
2331 case ACL_LINK:
2332 cnt = hdev->acl_cnt;
2333 break;
2334 case SCO_LINK:
2335 case ESCO_LINK:
2336 cnt = hdev->sco_cnt;
2337 break;
2338 case LE_LINK:
2339 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2340 break;
2341 default:
2342 cnt = 0;
2343 BT_ERR("Unknown link type");
2344 }
2345
2346 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 *quote = q ? q : 1;
2348 } else
2349 *quote = 0;
2350
2351 BT_DBG("conn %p quote %d", conn, *quote);
2352 return conn;
2353}
2354
Ville Tervobae1f5d92011-02-10 22:38:53 -03002355static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356{
2357 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002358 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Ville Tervobae1f5d92011-02-10 22:38:53 -03002360 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002362 rcu_read_lock();
2363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002365 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002366 if (c->type == type && c->sent) {
2367 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 hdev->name, batostr(&c->dst));
2369 hci_acl_disconn(c, 0x13);
2370 }
2371 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002372
2373 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374}
2375
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002376static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2377 int *quote)
2378{
2379 struct hci_conn_hash *h = &hdev->conn_hash;
2380 struct hci_chan *chan = NULL;
2381 int num = 0, min = ~0, cur_prio = 0;
2382 struct hci_conn *conn;
2383 int cnt, q, conn_num = 0;
2384
2385 BT_DBG("%s", hdev->name);
2386
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002387 rcu_read_lock();
2388
2389 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002390 struct hci_chan *tmp;
2391
2392 if (conn->type != type)
2393 continue;
2394
2395 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2396 continue;
2397
2398 conn_num++;
2399
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002400 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002401 struct sk_buff *skb;
2402
2403 if (skb_queue_empty(&tmp->data_q))
2404 continue;
2405
2406 skb = skb_peek(&tmp->data_q);
2407 if (skb->priority < cur_prio)
2408 continue;
2409
2410 if (skb->priority > cur_prio) {
2411 num = 0;
2412 min = ~0;
2413 cur_prio = skb->priority;
2414 }
2415
2416 num++;
2417
2418 if (conn->sent < min) {
2419 min = conn->sent;
2420 chan = tmp;
2421 }
2422 }
2423
2424 if (hci_conn_num(hdev, type) == conn_num)
2425 break;
2426 }
2427
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002428 rcu_read_unlock();
2429
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002430 if (!chan)
2431 return NULL;
2432
2433 switch (chan->conn->type) {
2434 case ACL_LINK:
2435 cnt = hdev->acl_cnt;
2436 break;
2437 case SCO_LINK:
2438 case ESCO_LINK:
2439 cnt = hdev->sco_cnt;
2440 break;
2441 case LE_LINK:
2442 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2443 break;
2444 default:
2445 cnt = 0;
2446 BT_ERR("Unknown link type");
2447 }
2448
2449 q = cnt / num;
2450 *quote = q ? q : 1;
2451 BT_DBG("chan %p quote %d", chan, *quote);
2452 return chan;
2453}
2454
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002455static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2456{
2457 struct hci_conn_hash *h = &hdev->conn_hash;
2458 struct hci_conn *conn;
2459 int num = 0;
2460
2461 BT_DBG("%s", hdev->name);
2462
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002463 rcu_read_lock();
2464
2465 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002466 struct hci_chan *chan;
2467
2468 if (conn->type != type)
2469 continue;
2470
2471 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2472 continue;
2473
2474 num++;
2475
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002476 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002477 struct sk_buff *skb;
2478
2479 if (chan->sent) {
2480 chan->sent = 0;
2481 continue;
2482 }
2483
2484 if (skb_queue_empty(&chan->data_q))
2485 continue;
2486
2487 skb = skb_peek(&chan->data_q);
2488 if (skb->priority >= HCI_PRIO_MAX - 1)
2489 continue;
2490
2491 skb->priority = HCI_PRIO_MAX - 1;
2492
2493 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2494 skb->priority);
2495 }
2496
2497 if (hci_conn_num(hdev, type) == num)
2498 break;
2499 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002500
2501 rcu_read_unlock();
2502
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002503}
2504
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002505static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2506{
2507 /* Calculate count of blocks used by this packet */
2508 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2509}
2510
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002511static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 if (!test_bit(HCI_RAW, &hdev->flags)) {
2514 /* ACL tx timeout must be longer than maximum
2515 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002516 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002517 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002518 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002520}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002522static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2523{
2524 unsigned int cnt = hdev->acl_cnt;
2525 struct hci_chan *chan;
2526 struct sk_buff *skb;
2527 int quote;
2528
2529 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002530
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002531 while (hdev->acl_cnt &&
2532 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002533 u32 priority = (skb_peek(&chan->data_q))->priority;
2534 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002535 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2536 skb->len, skb->priority);
2537
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002538 /* Stop if priority has changed */
2539 if (skb->priority < priority)
2540 break;
2541
2542 skb = skb_dequeue(&chan->data_q);
2543
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002544 hci_conn_enter_active_mode(chan->conn,
2545 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002546
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 hci_send_frame(skb);
2548 hdev->acl_last_tx = jiffies;
2549
2550 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002551 chan->sent++;
2552 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 }
2554 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002555
2556 if (cnt != hdev->acl_cnt)
2557 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558}
2559
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002560static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2561{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002562 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002563 struct hci_chan *chan;
2564 struct sk_buff *skb;
2565 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002566
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002567 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002568
2569 while (hdev->block_cnt > 0 &&
2570 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2571 u32 priority = (skb_peek(&chan->data_q))->priority;
2572 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2573 int blocks;
2574
2575 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2576 skb->len, skb->priority);
2577
2578 /* Stop if priority has changed */
2579 if (skb->priority < priority)
2580 break;
2581
2582 skb = skb_dequeue(&chan->data_q);
2583
2584 blocks = __get_blocks(hdev, skb);
2585 if (blocks > hdev->block_cnt)
2586 return;
2587
2588 hci_conn_enter_active_mode(chan->conn,
2589 bt_cb(skb)->force_active);
2590
2591 hci_send_frame(skb);
2592 hdev->acl_last_tx = jiffies;
2593
2594 hdev->block_cnt -= blocks;
2595 quote -= blocks;
2596
2597 chan->sent += blocks;
2598 chan->conn->sent += blocks;
2599 }
2600 }
2601
2602 if (cnt != hdev->block_cnt)
2603 hci_prio_recalculate(hdev, ACL_LINK);
2604}
2605
2606static inline void hci_sched_acl(struct hci_dev *hdev)
2607{
2608 BT_DBG("%s", hdev->name);
2609
2610 if (!hci_conn_num(hdev, ACL_LINK))
2611 return;
2612
2613 switch (hdev->flow_ctl_mode) {
2614 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2615 hci_sched_acl_pkt(hdev);
2616 break;
2617
2618 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2619 hci_sched_acl_blk(hdev);
2620 break;
2621 }
2622}
2623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624/* Schedule SCO */
2625static inline void hci_sched_sco(struct hci_dev *hdev)
2626{
2627 struct hci_conn *conn;
2628 struct sk_buff *skb;
2629 int quote;
2630
2631 BT_DBG("%s", hdev->name);
2632
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002633 if (!hci_conn_num(hdev, SCO_LINK))
2634 return;
2635
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2637 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2638 BT_DBG("skb %p len %d", skb, skb->len);
2639 hci_send_frame(skb);
2640
2641 conn->sent++;
2642 if (conn->sent == ~0)
2643 conn->sent = 0;
2644 }
2645 }
2646}
2647
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002648static inline void hci_sched_esco(struct hci_dev *hdev)
2649{
2650 struct hci_conn *conn;
2651 struct sk_buff *skb;
2652 int quote;
2653
2654 BT_DBG("%s", hdev->name);
2655
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002656 if (!hci_conn_num(hdev, ESCO_LINK))
2657 return;
2658
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002659 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2660 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2661 BT_DBG("skb %p len %d", skb, skb->len);
2662 hci_send_frame(skb);
2663
2664 conn->sent++;
2665 if (conn->sent == ~0)
2666 conn->sent = 0;
2667 }
2668 }
2669}
2670
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002671static inline void hci_sched_le(struct hci_dev *hdev)
2672{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002673 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002674 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002675 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002676
2677 BT_DBG("%s", hdev->name);
2678
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002679 if (!hci_conn_num(hdev, LE_LINK))
2680 return;
2681
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002682 if (!test_bit(HCI_RAW, &hdev->flags)) {
2683 /* LE tx timeout must be longer than maximum
2684 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002685 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002686 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002687 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002688 }
2689
2690 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002691 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002692 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002693 u32 priority = (skb_peek(&chan->data_q))->priority;
2694 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002695 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2696 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002697
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002698 /* Stop if priority has changed */
2699 if (skb->priority < priority)
2700 break;
2701
2702 skb = skb_dequeue(&chan->data_q);
2703
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002704 hci_send_frame(skb);
2705 hdev->le_last_tx = jiffies;
2706
2707 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002708 chan->sent++;
2709 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002710 }
2711 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002712
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002713 if (hdev->le_pkts)
2714 hdev->le_cnt = cnt;
2715 else
2716 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002717
2718 if (cnt != tmp)
2719 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002720}
2721
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002722static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002724 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 struct sk_buff *skb;
2726
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002727 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2728 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729
2730 /* Schedule queues and send stuff to HCI driver */
2731
2732 hci_sched_acl(hdev);
2733
2734 hci_sched_sco(hdev);
2735
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002736 hci_sched_esco(hdev);
2737
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002738 hci_sched_le(hdev);
2739
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 /* Send next queued raw (unknown type) packet */
2741 while ((skb = skb_dequeue(&hdev->raw_q)))
2742 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743}
2744
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002745/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747/* ACL data packet */
2748static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2749{
2750 struct hci_acl_hdr *hdr = (void *) skb->data;
2751 struct hci_conn *conn;
2752 __u16 handle, flags;
2753
2754 skb_pull(skb, HCI_ACL_HDR_SIZE);
2755
2756 handle = __le16_to_cpu(hdr->handle);
2757 flags = hci_flags(handle);
2758 handle = hci_handle(handle);
2759
2760 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2761
2762 hdev->stat.acl_rx++;
2763
2764 hci_dev_lock(hdev);
2765 conn = hci_conn_hash_lookup_handle(hdev, handle);
2766 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002769 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002770
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002772 l2cap_recv_acldata(conn, skb, flags);
2773 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002775 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 hdev->name, handle);
2777 }
2778
2779 kfree_skb(skb);
2780}
2781
2782/* SCO data packet */
2783static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2784{
2785 struct hci_sco_hdr *hdr = (void *) skb->data;
2786 struct hci_conn *conn;
2787 __u16 handle;
2788
2789 skb_pull(skb, HCI_SCO_HDR_SIZE);
2790
2791 handle = __le16_to_cpu(hdr->handle);
2792
2793 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2794
2795 hdev->stat.sco_rx++;
2796
2797 hci_dev_lock(hdev);
2798 conn = hci_conn_hash_lookup_handle(hdev, handle);
2799 hci_dev_unlock(hdev);
2800
2801 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002803 sco_recv_scodata(conn, skb);
2804 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002806 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 hdev->name, handle);
2808 }
2809
2810 kfree_skb(skb);
2811}
2812
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002813static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002815 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 struct sk_buff *skb;
2817
2818 BT_DBG("%s", hdev->name);
2819
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 while ((skb = skb_dequeue(&hdev->rx_q))) {
2821 if (atomic_read(&hdev->promisc)) {
2822 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002823 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 }
2825
2826 if (test_bit(HCI_RAW, &hdev->flags)) {
2827 kfree_skb(skb);
2828 continue;
2829 }
2830
2831 if (test_bit(HCI_INIT, &hdev->flags)) {
2832 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002833 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 case HCI_ACLDATA_PKT:
2835 case HCI_SCODATA_PKT:
2836 kfree_skb(skb);
2837 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 }
2840
2841 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002842 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002844 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 hci_event_packet(hdev, skb);
2846 break;
2847
2848 case HCI_ACLDATA_PKT:
2849 BT_DBG("%s ACL data packet", hdev->name);
2850 hci_acldata_packet(hdev, skb);
2851 break;
2852
2853 case HCI_SCODATA_PKT:
2854 BT_DBG("%s SCO data packet", hdev->name);
2855 hci_scodata_packet(hdev, skb);
2856 break;
2857
2858 default:
2859 kfree_skb(skb);
2860 break;
2861 }
2862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863}
2864
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002865static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002867 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 struct sk_buff *skb;
2869
2870 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2871
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002873 if (atomic_read(&hdev->cmd_cnt)) {
2874 skb = skb_dequeue(&hdev->cmd_q);
2875 if (!skb)
2876 return;
2877
Wei Yongjun7585b972009-02-25 18:29:52 +08002878 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002880 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2881 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 atomic_dec(&hdev->cmd_cnt);
2883 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002884 if (test_bit(HCI_RESET, &hdev->flags))
2885 del_timer(&hdev->cmd_timer);
2886 else
2887 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002888 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 } else {
2890 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002891 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 }
2893 }
2894}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002895
2896int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2897{
2898 /* General inquiry access code (GIAC) */
2899 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2900 struct hci_cp_inquiry cp;
2901
2902 BT_DBG("%s", hdev->name);
2903
2904 if (test_bit(HCI_INQUIRY, &hdev->flags))
2905 return -EINPROGRESS;
2906
Johan Hedberg46632622012-01-02 16:06:08 +02002907 inquiry_cache_flush(hdev);
2908
Andre Guedes2519a1f2011-11-07 11:45:24 -03002909 memset(&cp, 0, sizeof(cp));
2910 memcpy(&cp.lap, lap, sizeof(cp.lap));
2911 cp.length = length;
2912
2913 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2914}
Andre Guedes023d50492011-11-04 14:16:52 -03002915
2916int hci_cancel_inquiry(struct hci_dev *hdev)
2917{
2918 BT_DBG("%s", hdev->name);
2919
2920 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2921 return -EPERM;
2922
2923 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2924}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002925
2926module_param(enable_hs, bool, 0644);
2927MODULE_PARM_DESC(enable_hs, "Enable High Speed");