blob: feeea4df25296ba7f0d7acbeb0e1c31dd3ac3de8 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058int enable_hs;
59
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
358static void inquiry_cache_flush(struct hci_dev *hdev)
359{
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200360 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200362 list_for_each_entry_safe(p, n, &hdev->inq_cache.list, list) {
363 list_del(&p->list);
364 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 }
366}
367
368struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_entry *e;
372
373 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
374
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200375 list_for_each_entry(e, &cache->list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200377 return e;
378 }
379
380 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
383void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
384{
385 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200386 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
389
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200390 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
391 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200393 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
394 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200396
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200397 list_add(&ie->list, &cache->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 }
399
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200400 memcpy(&ie->data, data, sizeof(*data));
401 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 cache->timestamp = jiffies;
403}
404
405static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
406{
407 struct inquiry_cache *cache = &hdev->inq_cache;
408 struct inquiry_info *info = (struct inquiry_info *) buf;
409 struct inquiry_entry *e;
410 int copied = 0;
411
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200412 list_for_each_entry(e, &cache->list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200414
415 if (copied >= num)
416 break;
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 bacpy(&info->bdaddr, &data->bdaddr);
419 info->pscan_rep_mode = data->pscan_rep_mode;
420 info->pscan_period_mode = data->pscan_period_mode;
421 info->pscan_mode = data->pscan_mode;
422 memcpy(info->dev_class, data->dev_class, 3);
423 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200426 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
428
429 BT_DBG("cache %p, copied %d", cache, copied);
430 return copied;
431}
432
433static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
434{
435 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
436 struct hci_cp_inquiry cp;
437
438 BT_DBG("%s", hdev->name);
439
440 if (test_bit(HCI_INQUIRY, &hdev->flags))
441 return;
442
443 /* Start Inquiry */
444 memcpy(&cp.lap, &ir->lap, 3);
445 cp.length = ir->length;
446 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200447 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448}
449
450int hci_inquiry(void __user *arg)
451{
452 __u8 __user *ptr = arg;
453 struct hci_inquiry_req ir;
454 struct hci_dev *hdev;
455 int err = 0, do_inquiry = 0, max_rsp;
456 long timeo;
457 __u8 *buf;
458
459 if (copy_from_user(&ir, ptr, sizeof(ir)))
460 return -EFAULT;
461
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200462 hdev = hci_dev_get(ir.dev_id);
463 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 return -ENODEV;
465
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300466 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900467 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200468 inquiry_cache_empty(hdev) ||
469 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 inquiry_cache_flush(hdev);
471 do_inquiry = 1;
472 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300473 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Marcel Holtmann04837f62006-07-03 10:02:33 +0200475 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200476
477 if (do_inquiry) {
478 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
479 if (err < 0)
480 goto done;
481 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483 /* for unlimited number of responses we will use buffer with 255 entries */
484 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
485
486 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
487 * copy it to the user space.
488 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100489 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200490 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 err = -ENOMEM;
492 goto done;
493 }
494
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300495 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300497 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
499 BT_DBG("num_rsp %d", ir.num_rsp);
500
501 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
502 ptr += sizeof(ir);
503 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
504 ir.num_rsp))
505 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900506 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 err = -EFAULT;
508
509 kfree(buf);
510
511done:
512 hci_dev_put(hdev);
513 return err;
514}
515
516/* ---- HCI ioctl helpers ---- */
517
518int hci_dev_open(__u16 dev)
519{
520 struct hci_dev *hdev;
521 int ret = 0;
522
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200523 hdev = hci_dev_get(dev);
524 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 return -ENODEV;
526
527 BT_DBG("%s %p", hdev->name, hdev);
528
529 hci_req_lock(hdev);
530
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200531 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
532 ret = -ERFKILL;
533 goto done;
534 }
535
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 if (test_bit(HCI_UP, &hdev->flags)) {
537 ret = -EALREADY;
538 goto done;
539 }
540
541 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
542 set_bit(HCI_RAW, &hdev->flags);
543
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200544 /* Treat all non BR/EDR controllers as raw devices if
545 enable_hs is not set */
546 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100547 set_bit(HCI_RAW, &hdev->flags);
548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 if (hdev->open(hdev)) {
550 ret = -EIO;
551 goto done;
552 }
553
554 if (!test_bit(HCI_RAW, &hdev->flags)) {
555 atomic_set(&hdev->cmd_cnt, 1);
556 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200557 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Marcel Holtmann04837f62006-07-03 10:02:33 +0200559 ret = __hci_request(hdev, hci_init_req, 0,
560 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Andre Guedeseead27d2011-06-30 19:20:55 -0300562 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300563 ret = __hci_request(hdev, hci_le_init_req, 0,
564 msecs_to_jiffies(HCI_INIT_TIMEOUT));
565
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 clear_bit(HCI_INIT, &hdev->flags);
567 }
568
569 if (!ret) {
570 hci_dev_hold(hdev);
571 set_bit(HCI_UP, &hdev->flags);
572 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200573 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300574 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200575 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300576 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200577 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900578 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200580 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200581 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400582 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
584 skb_queue_purge(&hdev->cmd_q);
585 skb_queue_purge(&hdev->rx_q);
586
587 if (hdev->flush)
588 hdev->flush(hdev);
589
590 if (hdev->sent_cmd) {
591 kfree_skb(hdev->sent_cmd);
592 hdev->sent_cmd = NULL;
593 }
594
595 hdev->close(hdev);
596 hdev->flags = 0;
597 }
598
599done:
600 hci_req_unlock(hdev);
601 hci_dev_put(hdev);
602 return ret;
603}
604
605static int hci_dev_do_close(struct hci_dev *hdev)
606{
607 BT_DBG("%s %p", hdev->name, hdev);
608
609 hci_req_cancel(hdev, ENODEV);
610 hci_req_lock(hdev);
611
612 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300613 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 hci_req_unlock(hdev);
615 return 0;
616 }
617
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200618 /* Flush RX and TX works */
619 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400620 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200622 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200623 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200624 hdev->discov_timeout = 0;
625 }
626
Johan Hedberg32435532011-11-07 22:16:04 +0200627 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200628 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200629
Johan Hedberg7d785252011-12-15 00:47:39 +0200630 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
631 cancel_delayed_work(&hdev->service_cache);
632
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300633 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 inquiry_cache_flush(hdev);
635 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300636 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
638 hci_notify(hdev, HCI_DEV_DOWN);
639
640 if (hdev->flush)
641 hdev->flush(hdev);
642
643 /* Reset device */
644 skb_queue_purge(&hdev->cmd_q);
645 atomic_set(&hdev->cmd_cnt, 1);
646 if (!test_bit(HCI_RAW, &hdev->flags)) {
647 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200648 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200649 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 clear_bit(HCI_INIT, &hdev->flags);
651 }
652
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200653 /* flush cmd work */
654 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
656 /* Drop queues */
657 skb_queue_purge(&hdev->rx_q);
658 skb_queue_purge(&hdev->cmd_q);
659 skb_queue_purge(&hdev->raw_q);
660
661 /* Drop last sent command */
662 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300663 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 kfree_skb(hdev->sent_cmd);
665 hdev->sent_cmd = NULL;
666 }
667
668 /* After this point our queues are empty
669 * and no tasks are scheduled. */
670 hdev->close(hdev);
671
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300672 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200673 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300674 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 /* Clear flags */
677 hdev->flags = 0;
678
679 hci_req_unlock(hdev);
680
681 hci_dev_put(hdev);
682 return 0;
683}
684
685int hci_dev_close(__u16 dev)
686{
687 struct hci_dev *hdev;
688 int err;
689
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200690 hdev = hci_dev_get(dev);
691 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 return -ENODEV;
693 err = hci_dev_do_close(hdev);
694 hci_dev_put(hdev);
695 return err;
696}
697
698int hci_dev_reset(__u16 dev)
699{
700 struct hci_dev *hdev;
701 int ret = 0;
702
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200703 hdev = hci_dev_get(dev);
704 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 return -ENODEV;
706
707 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
709 if (!test_bit(HCI_UP, &hdev->flags))
710 goto done;
711
712 /* Drop queues */
713 skb_queue_purge(&hdev->rx_q);
714 skb_queue_purge(&hdev->cmd_q);
715
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300716 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 inquiry_cache_flush(hdev);
718 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300719 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 if (hdev->flush)
722 hdev->flush(hdev);
723
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900724 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300725 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
727 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200728 ret = __hci_request(hdev, hci_reset_req, 0,
729 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
731done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 hci_req_unlock(hdev);
733 hci_dev_put(hdev);
734 return ret;
735}
736
737int hci_dev_reset_stat(__u16 dev)
738{
739 struct hci_dev *hdev;
740 int ret = 0;
741
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200742 hdev = hci_dev_get(dev);
743 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 return -ENODEV;
745
746 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
747
748 hci_dev_put(hdev);
749
750 return ret;
751}
752
753int hci_dev_cmd(unsigned int cmd, void __user *arg)
754{
755 struct hci_dev *hdev;
756 struct hci_dev_req dr;
757 int err = 0;
758
759 if (copy_from_user(&dr, arg, sizeof(dr)))
760 return -EFAULT;
761
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200762 hdev = hci_dev_get(dr.dev_id);
763 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 return -ENODEV;
765
766 switch (cmd) {
767 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200768 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
769 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 break;
771
772 case HCISETENCRYPT:
773 if (!lmp_encrypt_capable(hdev)) {
774 err = -EOPNOTSUPP;
775 break;
776 }
777
778 if (!test_bit(HCI_AUTH, &hdev->flags)) {
779 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200780 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
781 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 if (err)
783 break;
784 }
785
Marcel Holtmann04837f62006-07-03 10:02:33 +0200786 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
787 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 break;
789
790 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200791 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
792 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 break;
794
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200795 case HCISETLINKPOL:
796 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
797 msecs_to_jiffies(HCI_INIT_TIMEOUT));
798 break;
799
800 case HCISETLINKMODE:
801 hdev->link_mode = ((__u16) dr.dev_opt) &
802 (HCI_LM_MASTER | HCI_LM_ACCEPT);
803 break;
804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 case HCISETPTYPE:
806 hdev->pkt_type = (__u16) dr.dev_opt;
807 break;
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200810 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
811 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 break;
813
814 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200815 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
816 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 break;
818
819 default:
820 err = -EINVAL;
821 break;
822 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 hci_dev_put(hdev);
825 return err;
826}
827
828int hci_get_dev_list(void __user *arg)
829{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200830 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 struct hci_dev_list_req *dl;
832 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 int n = 0, size, err;
834 __u16 dev_num;
835
836 if (get_user(dev_num, (__u16 __user *) arg))
837 return -EFAULT;
838
839 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
840 return -EINVAL;
841
842 size = sizeof(*dl) + dev_num * sizeof(*dr);
843
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200844 dl = kzalloc(size, GFP_KERNEL);
845 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 return -ENOMEM;
847
848 dr = dl->dev_req;
849
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200850 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200851 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200852 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200853 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200854
855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 (dr + n)->dev_id = hdev->id;
859 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 if (++n >= dev_num)
862 break;
863 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200864 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866 dl->dev_num = n;
867 size = sizeof(*dl) + n * sizeof(*dr);
868
869 err = copy_to_user(arg, dl, size);
870 kfree(dl);
871
872 return err ? -EFAULT : 0;
873}
874
875int hci_get_dev_info(void __user *arg)
876{
877 struct hci_dev *hdev;
878 struct hci_dev_info di;
879 int err = 0;
880
881 if (copy_from_user(&di, arg, sizeof(di)))
882 return -EFAULT;
883
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200884 hdev = hci_dev_get(di.dev_id);
885 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 return -ENODEV;
887
Johan Hedberg32435532011-11-07 22:16:04 +0200888 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
889 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200890
Johan Hedbergc542a062011-01-26 13:11:03 +0200891 if (!test_bit(HCI_MGMT, &hdev->flags))
892 set_bit(HCI_PAIRABLE, &hdev->flags);
893
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 strcpy(di.name, hdev->name);
895 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100896 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 di.flags = hdev->flags;
898 di.pkt_type = hdev->pkt_type;
899 di.acl_mtu = hdev->acl_mtu;
900 di.acl_pkts = hdev->acl_pkts;
901 di.sco_mtu = hdev->sco_mtu;
902 di.sco_pkts = hdev->sco_pkts;
903 di.link_policy = hdev->link_policy;
904 di.link_mode = hdev->link_mode;
905
906 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
907 memcpy(&di.features, &hdev->features, sizeof(di.features));
908
909 if (copy_to_user(arg, &di, sizeof(di)))
910 err = -EFAULT;
911
912 hci_dev_put(hdev);
913
914 return err;
915}
916
917/* ---- Interface to HCI drivers ---- */
918
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200919static int hci_rfkill_set_block(void *data, bool blocked)
920{
921 struct hci_dev *hdev = data;
922
923 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
924
925 if (!blocked)
926 return 0;
927
928 hci_dev_do_close(hdev);
929
930 return 0;
931}
932
933static const struct rfkill_ops hci_rfkill_ops = {
934 .set_block = hci_rfkill_set_block,
935};
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937/* Alloc HCI device */
938struct hci_dev *hci_alloc_dev(void)
939{
940 struct hci_dev *hdev;
941
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200942 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 if (!hdev)
944 return NULL;
945
David Herrmann0ac7e702011-10-08 14:58:47 +0200946 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 skb_queue_head_init(&hdev->driver_init);
948
949 return hdev;
950}
951EXPORT_SYMBOL(hci_alloc_dev);
952
953/* Free HCI device */
954void hci_free_dev(struct hci_dev *hdev)
955{
956 skb_queue_purge(&hdev->driver_init);
957
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200958 /* will free via device release */
959 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960}
961EXPORT_SYMBOL(hci_free_dev);
962
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200963static void hci_power_on(struct work_struct *work)
964{
965 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
966
967 BT_DBG("%s", hdev->name);
968
969 if (hci_dev_open(hdev->id) < 0)
970 return;
971
972 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -0200973 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +0200974 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200975
976 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200977 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200978}
979
980static void hci_power_off(struct work_struct *work)
981{
Johan Hedberg32435532011-11-07 22:16:04 +0200982 struct hci_dev *hdev = container_of(work, struct hci_dev,
983 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200984
985 BT_DBG("%s", hdev->name);
986
Johan Hedberg32435532011-11-07 22:16:04 +0200987 clear_bit(HCI_AUTO_OFF, &hdev->flags);
988
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200989 hci_dev_close(hdev->id);
990}
991
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200992static void hci_discov_off(struct work_struct *work)
993{
994 struct hci_dev *hdev;
995 u8 scan = SCAN_PAGE;
996
997 hdev = container_of(work, struct hci_dev, discov_off.work);
998
999 BT_DBG("%s", hdev->name);
1000
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001001 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001002
1003 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1004
1005 hdev->discov_timeout = 0;
1006
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001007 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001008}
1009
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001010int hci_uuids_clear(struct hci_dev *hdev)
1011{
1012 struct list_head *p, *n;
1013
1014 list_for_each_safe(p, n, &hdev->uuids) {
1015 struct bt_uuid *uuid;
1016
1017 uuid = list_entry(p, struct bt_uuid, list);
1018
1019 list_del(p);
1020 kfree(uuid);
1021 }
1022
1023 return 0;
1024}
1025
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001026int hci_link_keys_clear(struct hci_dev *hdev)
1027{
1028 struct list_head *p, *n;
1029
1030 list_for_each_safe(p, n, &hdev->link_keys) {
1031 struct link_key *key;
1032
1033 key = list_entry(p, struct link_key, list);
1034
1035 list_del(p);
1036 kfree(key);
1037 }
1038
1039 return 0;
1040}
1041
1042struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1043{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001044 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001045
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001046 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001047 if (bacmp(bdaddr, &k->bdaddr) == 0)
1048 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001049
1050 return NULL;
1051}
1052
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001053static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1054 u8 key_type, u8 old_key_type)
1055{
1056 /* Legacy key */
1057 if (key_type < 0x03)
1058 return 1;
1059
1060 /* Debug keys are insecure so don't store them persistently */
1061 if (key_type == HCI_LK_DEBUG_COMBINATION)
1062 return 0;
1063
1064 /* Changed combination key and there's no previous one */
1065 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1066 return 0;
1067
1068 /* Security mode 3 case */
1069 if (!conn)
1070 return 1;
1071
1072 /* Neither local nor remote side had no-bonding as requirement */
1073 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1074 return 1;
1075
1076 /* Local side had dedicated bonding as requirement */
1077 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1078 return 1;
1079
1080 /* Remote side had dedicated bonding as requirement */
1081 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1082 return 1;
1083
1084 /* If none of the above criteria match, then don't store the key
1085 * persistently */
1086 return 0;
1087}
1088
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001089struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1090{
1091 struct link_key *k;
1092
1093 list_for_each_entry(k, &hdev->link_keys, list) {
1094 struct key_master_id *id;
1095
1096 if (k->type != HCI_LK_SMP_LTK)
1097 continue;
1098
1099 if (k->dlen != sizeof(*id))
1100 continue;
1101
1102 id = (void *) &k->data;
1103 if (id->ediv == ediv &&
1104 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1105 return k;
1106 }
1107
1108 return NULL;
1109}
1110EXPORT_SYMBOL(hci_find_ltk);
1111
1112struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1113 bdaddr_t *bdaddr, u8 type)
1114{
1115 struct link_key *k;
1116
1117 list_for_each_entry(k, &hdev->link_keys, list)
1118 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1119 return k;
1120
1121 return NULL;
1122}
1123EXPORT_SYMBOL(hci_find_link_key_type);
1124
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001125int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1126 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001127{
1128 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001129 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001130
1131 old_key = hci_find_link_key(hdev, bdaddr);
1132 if (old_key) {
1133 old_key_type = old_key->type;
1134 key = old_key;
1135 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001136 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001137 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1138 if (!key)
1139 return -ENOMEM;
1140 list_add(&key->list, &hdev->link_keys);
1141 }
1142
1143 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1144
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001145 /* Some buggy controller combinations generate a changed
1146 * combination key for legacy pairing even when there's no
1147 * previous key */
1148 if (type == HCI_LK_CHANGED_COMBINATION &&
1149 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001150 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001151 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001152 if (conn)
1153 conn->key_type = type;
1154 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001155
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001156 bacpy(&key->bdaddr, bdaddr);
1157 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001158 key->pin_len = pin_len;
1159
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001160 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001161 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001162 else
1163 key->type = type;
1164
Johan Hedberg4df378a2011-04-28 11:29:03 -07001165 if (!new_key)
1166 return 0;
1167
1168 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1169
Johan Hedberg744cf192011-11-08 20:40:14 +02001170 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001171
1172 if (!persistent) {
1173 list_del(&key->list);
1174 kfree(key);
1175 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001176
1177 return 0;
1178}
1179
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001180int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001181 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001182{
1183 struct link_key *key, *old_key;
1184 struct key_master_id *id;
1185 u8 old_key_type;
1186
1187 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1188
1189 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1190 if (old_key) {
1191 key = old_key;
1192 old_key_type = old_key->type;
1193 } else {
1194 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1195 if (!key)
1196 return -ENOMEM;
1197 list_add(&key->list, &hdev->link_keys);
1198 old_key_type = 0xff;
1199 }
1200
1201 key->dlen = sizeof(*id);
1202
1203 bacpy(&key->bdaddr, bdaddr);
1204 memcpy(key->val, ltk, sizeof(key->val));
1205 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001206 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001207
1208 id = (void *) &key->data;
1209 id->ediv = ediv;
1210 memcpy(id->rand, rand, sizeof(id->rand));
1211
1212 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001213 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001214
1215 return 0;
1216}
1217
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001218int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1219{
1220 struct link_key *key;
1221
1222 key = hci_find_link_key(hdev, bdaddr);
1223 if (!key)
1224 return -ENOENT;
1225
1226 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1227
1228 list_del(&key->list);
1229 kfree(key);
1230
1231 return 0;
1232}
1233
Ville Tervo6bd32322011-02-16 16:32:41 +02001234/* HCI command timer function */
1235static void hci_cmd_timer(unsigned long arg)
1236{
1237 struct hci_dev *hdev = (void *) arg;
1238
1239 BT_ERR("%s command tx timeout", hdev->name);
1240 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001241 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001242}
1243
Szymon Janc2763eda2011-03-22 13:12:22 +01001244struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1245 bdaddr_t *bdaddr)
1246{
1247 struct oob_data *data;
1248
1249 list_for_each_entry(data, &hdev->remote_oob_data, list)
1250 if (bacmp(bdaddr, &data->bdaddr) == 0)
1251 return data;
1252
1253 return NULL;
1254}
1255
1256int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1257{
1258 struct oob_data *data;
1259
1260 data = hci_find_remote_oob_data(hdev, bdaddr);
1261 if (!data)
1262 return -ENOENT;
1263
1264 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1265
1266 list_del(&data->list);
1267 kfree(data);
1268
1269 return 0;
1270}
1271
1272int hci_remote_oob_data_clear(struct hci_dev *hdev)
1273{
1274 struct oob_data *data, *n;
1275
1276 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1277 list_del(&data->list);
1278 kfree(data);
1279 }
1280
1281 return 0;
1282}
1283
1284int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1285 u8 *randomizer)
1286{
1287 struct oob_data *data;
1288
1289 data = hci_find_remote_oob_data(hdev, bdaddr);
1290
1291 if (!data) {
1292 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1293 if (!data)
1294 return -ENOMEM;
1295
1296 bacpy(&data->bdaddr, bdaddr);
1297 list_add(&data->list, &hdev->remote_oob_data);
1298 }
1299
1300 memcpy(data->hash, hash, sizeof(data->hash));
1301 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1302
1303 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1304
1305 return 0;
1306}
1307
Antti Julkub2a66aa2011-06-15 12:01:14 +03001308struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1309 bdaddr_t *bdaddr)
1310{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001311 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001312
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001313 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001314 if (bacmp(bdaddr, &b->bdaddr) == 0)
1315 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001316
1317 return NULL;
1318}
1319
1320int hci_blacklist_clear(struct hci_dev *hdev)
1321{
1322 struct list_head *p, *n;
1323
1324 list_for_each_safe(p, n, &hdev->blacklist) {
1325 struct bdaddr_list *b;
1326
1327 b = list_entry(p, struct bdaddr_list, list);
1328
1329 list_del(p);
1330 kfree(b);
1331 }
1332
1333 return 0;
1334}
1335
1336int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1337{
1338 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001339
1340 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1341 return -EBADF;
1342
Antti Julku5e762442011-08-25 16:48:02 +03001343 if (hci_blacklist_lookup(hdev, bdaddr))
1344 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001345
1346 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001347 if (!entry)
1348 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001349
1350 bacpy(&entry->bdaddr, bdaddr);
1351
1352 list_add(&entry->list, &hdev->blacklist);
1353
Johan Hedberg744cf192011-11-08 20:40:14 +02001354 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001355}
1356
1357int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1358{
1359 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001360
Szymon Janc1ec918c2011-11-16 09:32:21 +01001361 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001362 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001363
1364 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001365 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001366 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001367
1368 list_del(&entry->list);
1369 kfree(entry);
1370
Johan Hedberg744cf192011-11-08 20:40:14 +02001371 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001372}
1373
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001374static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001375{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001376 struct hci_dev *hdev = container_of(work, struct hci_dev,
1377 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001378
1379 hci_dev_lock(hdev);
1380
1381 hci_adv_entries_clear(hdev);
1382
1383 hci_dev_unlock(hdev);
1384}
1385
Andre Guedes76c86862011-05-26 16:23:50 -03001386int hci_adv_entries_clear(struct hci_dev *hdev)
1387{
1388 struct adv_entry *entry, *tmp;
1389
1390 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1391 list_del(&entry->list);
1392 kfree(entry);
1393 }
1394
1395 BT_DBG("%s adv cache cleared", hdev->name);
1396
1397 return 0;
1398}
1399
1400struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1401{
1402 struct adv_entry *entry;
1403
1404 list_for_each_entry(entry, &hdev->adv_entries, list)
1405 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1406 return entry;
1407
1408 return NULL;
1409}
1410
1411static inline int is_connectable_adv(u8 evt_type)
1412{
1413 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1414 return 1;
1415
1416 return 0;
1417}
1418
1419int hci_add_adv_entry(struct hci_dev *hdev,
1420 struct hci_ev_le_advertising_info *ev)
1421{
1422 struct adv_entry *entry;
1423
1424 if (!is_connectable_adv(ev->evt_type))
1425 return -EINVAL;
1426
1427 /* Only new entries should be added to adv_entries. So, if
1428 * bdaddr was found, don't add it. */
1429 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1430 return 0;
1431
1432 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1433 if (!entry)
1434 return -ENOMEM;
1435
1436 bacpy(&entry->bdaddr, &ev->bdaddr);
1437 entry->bdaddr_type = ev->bdaddr_type;
1438
1439 list_add(&entry->list, &hdev->adv_entries);
1440
1441 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1442 batostr(&entry->bdaddr), entry->bdaddr_type);
1443
1444 return 0;
1445}
1446
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447/* Register HCI device */
1448int hci_register_dev(struct hci_dev *hdev)
1449{
1450 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001451 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001453 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1454 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 if (!hdev->open || !hdev->close || !hdev->destruct)
1457 return -EINVAL;
1458
Mat Martineau08add512011-11-02 16:18:36 -07001459 /* Do not allow HCI_AMP devices to register at index 0,
1460 * so the index can be used as the AMP controller ID.
1461 */
1462 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1463
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001464 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 /* Find first available device id */
1467 list_for_each(p, &hci_dev_list) {
1468 if (list_entry(p, struct hci_dev, list)->id != id)
1469 break;
1470 head = p; id++;
1471 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 sprintf(hdev->name, "hci%d", id);
1474 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001475 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
1477 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001478 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
1480 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001481 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001483 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001485 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
Marcel Holtmann04837f62006-07-03 10:02:33 +02001487 hdev->idle_timeout = 0;
1488 hdev->sniff_max_interval = 800;
1489 hdev->sniff_min_interval = 80;
1490
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001491 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001492 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001493 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
1496 skb_queue_head_init(&hdev->rx_q);
1497 skb_queue_head_init(&hdev->cmd_q);
1498 skb_queue_head_init(&hdev->raw_q);
1499
Ville Tervo6bd32322011-02-16 16:32:41 +02001500 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1501
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301502 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001503 hdev->reassembly[i] = NULL;
1504
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001506 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
1508 inquiry_cache_init(hdev);
1509
1510 hci_conn_hash_init(hdev);
1511
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001512 INIT_LIST_HEAD(&hdev->mgmt_pending);
1513
David Millerea4bd8b2010-07-30 21:54:49 -07001514 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001515
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001516 INIT_LIST_HEAD(&hdev->uuids);
1517
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001518 INIT_LIST_HEAD(&hdev->link_keys);
1519
Szymon Janc2763eda2011-03-22 13:12:22 +01001520 INIT_LIST_HEAD(&hdev->remote_oob_data);
1521
Andre Guedes76c86862011-05-26 16:23:50 -03001522 INIT_LIST_HEAD(&hdev->adv_entries);
1523
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001524 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001525 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001526 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001527
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001528 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1531
1532 atomic_set(&hdev->promisc, 0);
1533
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001534 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001536 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1537 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001538 if (!hdev->workqueue) {
1539 error = -ENOMEM;
1540 goto err;
1541 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001542
David Herrmann33ca9542011-10-08 14:58:49 +02001543 error = hci_add_sysfs(hdev);
1544 if (error < 0)
1545 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001547 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1548 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1549 if (hdev->rfkill) {
1550 if (rfkill_register(hdev->rfkill) < 0) {
1551 rfkill_destroy(hdev->rfkill);
1552 hdev->rfkill = NULL;
1553 }
1554 }
1555
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001556 set_bit(HCI_AUTO_OFF, &hdev->flags);
1557 set_bit(HCI_SETUP, &hdev->flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001558 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001559
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 hci_notify(hdev, HCI_DEV_REG);
1561
1562 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001563
David Herrmann33ca9542011-10-08 14:58:49 +02001564err_wqueue:
1565 destroy_workqueue(hdev->workqueue);
1566err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001567 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001568 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001569 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001570
David Herrmann33ca9542011-10-08 14:58:49 +02001571 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572}
1573EXPORT_SYMBOL(hci_register_dev);
1574
1575/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001576void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577{
Marcel Holtmannef222012007-07-11 06:42:04 +02001578 int i;
1579
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001580 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001582 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001584 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
1586 hci_dev_do_close(hdev);
1587
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301588 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001589 kfree_skb(hdev->reassembly[i]);
1590
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001591 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001592 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001593 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001594 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001595 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001596 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001597
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001598 /* mgmt_index_removed should take care of emptying the
1599 * pending list */
1600 BUG_ON(!list_empty(&hdev->mgmt_pending));
1601
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 hci_notify(hdev, HCI_DEV_UNREG);
1603
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001604 if (hdev->rfkill) {
1605 rfkill_unregister(hdev->rfkill);
1606 rfkill_destroy(hdev->rfkill);
1607 }
1608
David Herrmannce242972011-10-08 14:58:48 +02001609 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001610
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001611 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001612
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001613 destroy_workqueue(hdev->workqueue);
1614
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001615 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001616 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001617 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001618 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001619 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001620 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001621 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624}
1625EXPORT_SYMBOL(hci_unregister_dev);
1626
1627/* Suspend HCI device */
1628int hci_suspend_dev(struct hci_dev *hdev)
1629{
1630 hci_notify(hdev, HCI_DEV_SUSPEND);
1631 return 0;
1632}
1633EXPORT_SYMBOL(hci_suspend_dev);
1634
1635/* Resume HCI device */
1636int hci_resume_dev(struct hci_dev *hdev)
1637{
1638 hci_notify(hdev, HCI_DEV_RESUME);
1639 return 0;
1640}
1641EXPORT_SYMBOL(hci_resume_dev);
1642
Marcel Holtmann76bca882009-11-18 00:40:39 +01001643/* Receive frame from HCI drivers */
1644int hci_recv_frame(struct sk_buff *skb)
1645{
1646 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1647 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1648 && !test_bit(HCI_INIT, &hdev->flags))) {
1649 kfree_skb(skb);
1650 return -ENXIO;
1651 }
1652
1653 /* Incomming skb */
1654 bt_cb(skb)->incoming = 1;
1655
1656 /* Time stamp */
1657 __net_timestamp(skb);
1658
Marcel Holtmann76bca882009-11-18 00:40:39 +01001659 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001660 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001661
Marcel Holtmann76bca882009-11-18 00:40:39 +01001662 return 0;
1663}
1664EXPORT_SYMBOL(hci_recv_frame);
1665
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301666static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001667 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301668{
1669 int len = 0;
1670 int hlen = 0;
1671 int remain = count;
1672 struct sk_buff *skb;
1673 struct bt_skb_cb *scb;
1674
1675 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1676 index >= NUM_REASSEMBLY)
1677 return -EILSEQ;
1678
1679 skb = hdev->reassembly[index];
1680
1681 if (!skb) {
1682 switch (type) {
1683 case HCI_ACLDATA_PKT:
1684 len = HCI_MAX_FRAME_SIZE;
1685 hlen = HCI_ACL_HDR_SIZE;
1686 break;
1687 case HCI_EVENT_PKT:
1688 len = HCI_MAX_EVENT_SIZE;
1689 hlen = HCI_EVENT_HDR_SIZE;
1690 break;
1691 case HCI_SCODATA_PKT:
1692 len = HCI_MAX_SCO_SIZE;
1693 hlen = HCI_SCO_HDR_SIZE;
1694 break;
1695 }
1696
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001697 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301698 if (!skb)
1699 return -ENOMEM;
1700
1701 scb = (void *) skb->cb;
1702 scb->expect = hlen;
1703 scb->pkt_type = type;
1704
1705 skb->dev = (void *) hdev;
1706 hdev->reassembly[index] = skb;
1707 }
1708
1709 while (count) {
1710 scb = (void *) skb->cb;
1711 len = min(scb->expect, (__u16)count);
1712
1713 memcpy(skb_put(skb, len), data, len);
1714
1715 count -= len;
1716 data += len;
1717 scb->expect -= len;
1718 remain = count;
1719
1720 switch (type) {
1721 case HCI_EVENT_PKT:
1722 if (skb->len == HCI_EVENT_HDR_SIZE) {
1723 struct hci_event_hdr *h = hci_event_hdr(skb);
1724 scb->expect = h->plen;
1725
1726 if (skb_tailroom(skb) < scb->expect) {
1727 kfree_skb(skb);
1728 hdev->reassembly[index] = NULL;
1729 return -ENOMEM;
1730 }
1731 }
1732 break;
1733
1734 case HCI_ACLDATA_PKT:
1735 if (skb->len == HCI_ACL_HDR_SIZE) {
1736 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1737 scb->expect = __le16_to_cpu(h->dlen);
1738
1739 if (skb_tailroom(skb) < scb->expect) {
1740 kfree_skb(skb);
1741 hdev->reassembly[index] = NULL;
1742 return -ENOMEM;
1743 }
1744 }
1745 break;
1746
1747 case HCI_SCODATA_PKT:
1748 if (skb->len == HCI_SCO_HDR_SIZE) {
1749 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1750 scb->expect = h->dlen;
1751
1752 if (skb_tailroom(skb) < scb->expect) {
1753 kfree_skb(skb);
1754 hdev->reassembly[index] = NULL;
1755 return -ENOMEM;
1756 }
1757 }
1758 break;
1759 }
1760
1761 if (scb->expect == 0) {
1762 /* Complete frame */
1763
1764 bt_cb(skb)->pkt_type = type;
1765 hci_recv_frame(skb);
1766
1767 hdev->reassembly[index] = NULL;
1768 return remain;
1769 }
1770 }
1771
1772 return remain;
1773}
1774
Marcel Holtmannef222012007-07-11 06:42:04 +02001775int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1776{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301777 int rem = 0;
1778
Marcel Holtmannef222012007-07-11 06:42:04 +02001779 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1780 return -EILSEQ;
1781
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001782 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001783 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301784 if (rem < 0)
1785 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001786
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301787 data += (count - rem);
1788 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001789 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001790
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301791 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001792}
1793EXPORT_SYMBOL(hci_recv_fragment);
1794
Suraj Sumangala99811512010-07-14 13:02:19 +05301795#define STREAM_REASSEMBLY 0
1796
1797int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1798{
1799 int type;
1800 int rem = 0;
1801
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001802 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301803 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1804
1805 if (!skb) {
1806 struct { char type; } *pkt;
1807
1808 /* Start of the frame */
1809 pkt = data;
1810 type = pkt->type;
1811
1812 data++;
1813 count--;
1814 } else
1815 type = bt_cb(skb)->pkt_type;
1816
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001817 rem = hci_reassembly(hdev, type, data, count,
1818 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301819 if (rem < 0)
1820 return rem;
1821
1822 data += (count - rem);
1823 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001824 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301825
1826 return rem;
1827}
1828EXPORT_SYMBOL(hci_recv_stream_fragment);
1829
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830/* ---- Interface to upper protocols ---- */
1831
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832int hci_register_cb(struct hci_cb *cb)
1833{
1834 BT_DBG("%p name %s", cb, cb->name);
1835
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001836 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001838 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839
1840 return 0;
1841}
1842EXPORT_SYMBOL(hci_register_cb);
1843
1844int hci_unregister_cb(struct hci_cb *cb)
1845{
1846 BT_DBG("%p name %s", cb, cb->name);
1847
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001848 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001850 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
1852 return 0;
1853}
1854EXPORT_SYMBOL(hci_unregister_cb);
1855
1856static int hci_send_frame(struct sk_buff *skb)
1857{
1858 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1859
1860 if (!hdev) {
1861 kfree_skb(skb);
1862 return -ENODEV;
1863 }
1864
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001865 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
1867 if (atomic_read(&hdev->promisc)) {
1868 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001869 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001871 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 }
1873
1874 /* Get rid of skb owner, prior to sending to the driver. */
1875 skb_orphan(skb);
1876
1877 return hdev->send(skb);
1878}
1879
1880/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001881int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882{
1883 int len = HCI_COMMAND_HDR_SIZE + plen;
1884 struct hci_command_hdr *hdr;
1885 struct sk_buff *skb;
1886
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001887 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 skb = bt_skb_alloc(len, GFP_ATOMIC);
1890 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001891 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 return -ENOMEM;
1893 }
1894
1895 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001896 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 hdr->plen = plen;
1898
1899 if (plen)
1900 memcpy(skb_put(skb, plen), param, plen);
1901
1902 BT_DBG("skb len %d", skb->len);
1903
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001904 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001906
Johan Hedberga5040ef2011-01-10 13:28:59 +02001907 if (test_bit(HCI_INIT, &hdev->flags))
1908 hdev->init_last_cmd = opcode;
1909
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001911 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913 return 0;
1914}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001917void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918{
1919 struct hci_command_hdr *hdr;
1920
1921 if (!hdev->sent_cmd)
1922 return NULL;
1923
1924 hdr = (void *) hdev->sent_cmd->data;
1925
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001926 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 return NULL;
1928
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001929 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
1931 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1932}
1933
1934/* Send ACL data */
1935static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1936{
1937 struct hci_acl_hdr *hdr;
1938 int len = skb->len;
1939
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001940 skb_push(skb, HCI_ACL_HDR_SIZE);
1941 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001942 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001943 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1944 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945}
1946
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001947static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1948 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949{
1950 struct hci_dev *hdev = conn->hdev;
1951 struct sk_buff *list;
1952
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001953 list = skb_shinfo(skb)->frag_list;
1954 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 /* Non fragmented */
1956 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1957
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001958 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 } else {
1960 /* Fragmented */
1961 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1962
1963 skb_shinfo(skb)->frag_list = NULL;
1964
1965 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02001966 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001968 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001969
1970 flags &= ~ACL_START;
1971 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 do {
1973 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001974
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001976 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001977 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
1979 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1980
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001981 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 } while (list);
1983
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02001984 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001986}
1987
1988void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1989{
1990 struct hci_conn *conn = chan->conn;
1991 struct hci_dev *hdev = conn->hdev;
1992
1993 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1994
1995 skb->dev = (void *) hdev;
1996 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1997 hci_add_acl_hdr(skb, conn->handle, flags);
1998
1999 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002001 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002}
2003EXPORT_SYMBOL(hci_send_acl);
2004
2005/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002006void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007{
2008 struct hci_dev *hdev = conn->hdev;
2009 struct hci_sco_hdr hdr;
2010
2011 BT_DBG("%s len %d", hdev->name, skb->len);
2012
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002013 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 hdr.dlen = skb->len;
2015
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002016 skb_push(skb, HCI_SCO_HDR_SIZE);
2017 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002018 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002021 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002022
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002024 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025}
2026EXPORT_SYMBOL(hci_send_sco);
2027
2028/* ---- HCI TX task (outgoing data) ---- */
2029
2030/* HCI Connection scheduler */
2031static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2032{
2033 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002034 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002037 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002039
2040 rcu_read_lock();
2041
2042 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002043 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002045
2046 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2047 continue;
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 num++;
2050
2051 if (c->sent < min) {
2052 min = c->sent;
2053 conn = c;
2054 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002055
2056 if (hci_conn_num(hdev, type) == num)
2057 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 }
2059
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002060 rcu_read_unlock();
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002063 int cnt, q;
2064
2065 switch (conn->type) {
2066 case ACL_LINK:
2067 cnt = hdev->acl_cnt;
2068 break;
2069 case SCO_LINK:
2070 case ESCO_LINK:
2071 cnt = hdev->sco_cnt;
2072 break;
2073 case LE_LINK:
2074 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2075 break;
2076 default:
2077 cnt = 0;
2078 BT_ERR("Unknown link type");
2079 }
2080
2081 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 *quote = q ? q : 1;
2083 } else
2084 *quote = 0;
2085
2086 BT_DBG("conn %p quote %d", conn, *quote);
2087 return conn;
2088}
2089
Ville Tervobae1f5d92011-02-10 22:38:53 -03002090static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091{
2092 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002093 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
Ville Tervobae1f5d92011-02-10 22:38:53 -03002095 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002097 rcu_read_lock();
2098
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002100 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002101 if (c->type == type && c->sent) {
2102 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 hdev->name, batostr(&c->dst));
2104 hci_acl_disconn(c, 0x13);
2105 }
2106 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002107
2108 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109}
2110
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002111static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2112 int *quote)
2113{
2114 struct hci_conn_hash *h = &hdev->conn_hash;
2115 struct hci_chan *chan = NULL;
2116 int num = 0, min = ~0, cur_prio = 0;
2117 struct hci_conn *conn;
2118 int cnt, q, conn_num = 0;
2119
2120 BT_DBG("%s", hdev->name);
2121
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002122 rcu_read_lock();
2123
2124 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002125 struct hci_chan *tmp;
2126
2127 if (conn->type != type)
2128 continue;
2129
2130 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2131 continue;
2132
2133 conn_num++;
2134
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002135 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002136 struct sk_buff *skb;
2137
2138 if (skb_queue_empty(&tmp->data_q))
2139 continue;
2140
2141 skb = skb_peek(&tmp->data_q);
2142 if (skb->priority < cur_prio)
2143 continue;
2144
2145 if (skb->priority > cur_prio) {
2146 num = 0;
2147 min = ~0;
2148 cur_prio = skb->priority;
2149 }
2150
2151 num++;
2152
2153 if (conn->sent < min) {
2154 min = conn->sent;
2155 chan = tmp;
2156 }
2157 }
2158
2159 if (hci_conn_num(hdev, type) == conn_num)
2160 break;
2161 }
2162
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002163 rcu_read_unlock();
2164
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002165 if (!chan)
2166 return NULL;
2167
2168 switch (chan->conn->type) {
2169 case ACL_LINK:
2170 cnt = hdev->acl_cnt;
2171 break;
2172 case SCO_LINK:
2173 case ESCO_LINK:
2174 cnt = hdev->sco_cnt;
2175 break;
2176 case LE_LINK:
2177 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2178 break;
2179 default:
2180 cnt = 0;
2181 BT_ERR("Unknown link type");
2182 }
2183
2184 q = cnt / num;
2185 *quote = q ? q : 1;
2186 BT_DBG("chan %p quote %d", chan, *quote);
2187 return chan;
2188}
2189
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002190static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2191{
2192 struct hci_conn_hash *h = &hdev->conn_hash;
2193 struct hci_conn *conn;
2194 int num = 0;
2195
2196 BT_DBG("%s", hdev->name);
2197
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002198 rcu_read_lock();
2199
2200 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002201 struct hci_chan *chan;
2202
2203 if (conn->type != type)
2204 continue;
2205
2206 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2207 continue;
2208
2209 num++;
2210
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002211 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002212 struct sk_buff *skb;
2213
2214 if (chan->sent) {
2215 chan->sent = 0;
2216 continue;
2217 }
2218
2219 if (skb_queue_empty(&chan->data_q))
2220 continue;
2221
2222 skb = skb_peek(&chan->data_q);
2223 if (skb->priority >= HCI_PRIO_MAX - 1)
2224 continue;
2225
2226 skb->priority = HCI_PRIO_MAX - 1;
2227
2228 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2229 skb->priority);
2230 }
2231
2232 if (hci_conn_num(hdev, type) == num)
2233 break;
2234 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002235
2236 rcu_read_unlock();
2237
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002238}
2239
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240static inline void hci_sched_acl(struct hci_dev *hdev)
2241{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002242 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 struct sk_buff *skb;
2244 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002245 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
2247 BT_DBG("%s", hdev->name);
2248
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002249 if (!hci_conn_num(hdev, ACL_LINK))
2250 return;
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 if (!test_bit(HCI_RAW, &hdev->flags)) {
2253 /* ACL tx timeout must be longer than maximum
2254 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002255 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002256 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 }
2258
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002259 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002260
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002261 while (hdev->acl_cnt &&
2262 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002263 u32 priority = (skb_peek(&chan->data_q))->priority;
2264 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002265 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2266 skb->len, skb->priority);
2267
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002268 /* Stop if priority has changed */
2269 if (skb->priority < priority)
2270 break;
2271
2272 skb = skb_dequeue(&chan->data_q);
2273
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002274 hci_conn_enter_active_mode(chan->conn,
2275 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002276
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 hci_send_frame(skb);
2278 hdev->acl_last_tx = jiffies;
2279
2280 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002281 chan->sent++;
2282 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 }
2284 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002285
2286 if (cnt != hdev->acl_cnt)
2287 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288}
2289
2290/* Schedule SCO */
2291static inline void hci_sched_sco(struct hci_dev *hdev)
2292{
2293 struct hci_conn *conn;
2294 struct sk_buff *skb;
2295 int quote;
2296
2297 BT_DBG("%s", hdev->name);
2298
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002299 if (!hci_conn_num(hdev, SCO_LINK))
2300 return;
2301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2303 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2304 BT_DBG("skb %p len %d", skb, skb->len);
2305 hci_send_frame(skb);
2306
2307 conn->sent++;
2308 if (conn->sent == ~0)
2309 conn->sent = 0;
2310 }
2311 }
2312}
2313
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002314static inline void hci_sched_esco(struct hci_dev *hdev)
2315{
2316 struct hci_conn *conn;
2317 struct sk_buff *skb;
2318 int quote;
2319
2320 BT_DBG("%s", hdev->name);
2321
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002322 if (!hci_conn_num(hdev, ESCO_LINK))
2323 return;
2324
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002325 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2326 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2327 BT_DBG("skb %p len %d", skb, skb->len);
2328 hci_send_frame(skb);
2329
2330 conn->sent++;
2331 if (conn->sent == ~0)
2332 conn->sent = 0;
2333 }
2334 }
2335}
2336
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002337static inline void hci_sched_le(struct hci_dev *hdev)
2338{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002339 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002340 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002341 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002342
2343 BT_DBG("%s", hdev->name);
2344
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002345 if (!hci_conn_num(hdev, LE_LINK))
2346 return;
2347
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002348 if (!test_bit(HCI_RAW, &hdev->flags)) {
2349 /* LE tx timeout must be longer than maximum
2350 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002351 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002352 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002353 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002354 }
2355
2356 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002357 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002358 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002359 u32 priority = (skb_peek(&chan->data_q))->priority;
2360 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002361 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2362 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002363
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002364 /* Stop if priority has changed */
2365 if (skb->priority < priority)
2366 break;
2367
2368 skb = skb_dequeue(&chan->data_q);
2369
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002370 hci_send_frame(skb);
2371 hdev->le_last_tx = jiffies;
2372
2373 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002374 chan->sent++;
2375 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002376 }
2377 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002378
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002379 if (hdev->le_pkts)
2380 hdev->le_cnt = cnt;
2381 else
2382 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002383
2384 if (cnt != tmp)
2385 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002386}
2387
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002388static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002390 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 struct sk_buff *skb;
2392
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002393 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2394 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
2396 /* Schedule queues and send stuff to HCI driver */
2397
2398 hci_sched_acl(hdev);
2399
2400 hci_sched_sco(hdev);
2401
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002402 hci_sched_esco(hdev);
2403
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002404 hci_sched_le(hdev);
2405
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 /* Send next queued raw (unknown type) packet */
2407 while ((skb = skb_dequeue(&hdev->raw_q)))
2408 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409}
2410
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002411/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412
2413/* ACL data packet */
2414static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2415{
2416 struct hci_acl_hdr *hdr = (void *) skb->data;
2417 struct hci_conn *conn;
2418 __u16 handle, flags;
2419
2420 skb_pull(skb, HCI_ACL_HDR_SIZE);
2421
2422 handle = __le16_to_cpu(hdr->handle);
2423 flags = hci_flags(handle);
2424 handle = hci_handle(handle);
2425
2426 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2427
2428 hdev->stat.acl_rx++;
2429
2430 hci_dev_lock(hdev);
2431 conn = hci_conn_hash_lookup_handle(hdev, handle);
2432 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002435 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002436
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002438 l2cap_recv_acldata(conn, skb, flags);
2439 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002441 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 hdev->name, handle);
2443 }
2444
2445 kfree_skb(skb);
2446}
2447
2448/* SCO data packet */
2449static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2450{
2451 struct hci_sco_hdr *hdr = (void *) skb->data;
2452 struct hci_conn *conn;
2453 __u16 handle;
2454
2455 skb_pull(skb, HCI_SCO_HDR_SIZE);
2456
2457 handle = __le16_to_cpu(hdr->handle);
2458
2459 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2460
2461 hdev->stat.sco_rx++;
2462
2463 hci_dev_lock(hdev);
2464 conn = hci_conn_hash_lookup_handle(hdev, handle);
2465 hci_dev_unlock(hdev);
2466
2467 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002469 sco_recv_scodata(conn, skb);
2470 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002472 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 hdev->name, handle);
2474 }
2475
2476 kfree_skb(skb);
2477}
2478
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002479static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002481 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 struct sk_buff *skb;
2483
2484 BT_DBG("%s", hdev->name);
2485
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 while ((skb = skb_dequeue(&hdev->rx_q))) {
2487 if (atomic_read(&hdev->promisc)) {
2488 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002489 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 }
2491
2492 if (test_bit(HCI_RAW, &hdev->flags)) {
2493 kfree_skb(skb);
2494 continue;
2495 }
2496
2497 if (test_bit(HCI_INIT, &hdev->flags)) {
2498 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002499 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 case HCI_ACLDATA_PKT:
2501 case HCI_SCODATA_PKT:
2502 kfree_skb(skb);
2503 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 }
2506
2507 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002508 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002510 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 hci_event_packet(hdev, skb);
2512 break;
2513
2514 case HCI_ACLDATA_PKT:
2515 BT_DBG("%s ACL data packet", hdev->name);
2516 hci_acldata_packet(hdev, skb);
2517 break;
2518
2519 case HCI_SCODATA_PKT:
2520 BT_DBG("%s SCO data packet", hdev->name);
2521 hci_scodata_packet(hdev, skb);
2522 break;
2523
2524 default:
2525 kfree_skb(skb);
2526 break;
2527 }
2528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529}
2530
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002531static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002533 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 struct sk_buff *skb;
2535
2536 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2537
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002539 if (atomic_read(&hdev->cmd_cnt)) {
2540 skb = skb_dequeue(&hdev->cmd_q);
2541 if (!skb)
2542 return;
2543
Wei Yongjun7585b972009-02-25 18:29:52 +08002544 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002546 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2547 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 atomic_dec(&hdev->cmd_cnt);
2549 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002550 if (test_bit(HCI_RESET, &hdev->flags))
2551 del_timer(&hdev->cmd_timer);
2552 else
2553 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002554 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 } else {
2556 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002557 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 }
2559 }
2560}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002561
2562int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2563{
2564 /* General inquiry access code (GIAC) */
2565 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2566 struct hci_cp_inquiry cp;
2567
2568 BT_DBG("%s", hdev->name);
2569
2570 if (test_bit(HCI_INQUIRY, &hdev->flags))
2571 return -EINPROGRESS;
2572
2573 memset(&cp, 0, sizeof(cp));
2574 memcpy(&cp.lap, lap, sizeof(cp.lap));
2575 cp.length = length;
2576
2577 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2578}
Andre Guedes023d50492011-11-04 14:16:52 -03002579
2580int hci_cancel_inquiry(struct hci_dev *hdev)
2581{
2582 BT_DBG("%s", hdev->name);
2583
2584 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2585 return -EPERM;
2586
2587 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2588}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002589
2590module_param(enable_hs, bool, 0644);
2591MODULE_PARM_DESC(enable_hs, "Enable High Speed");