blob: fea8dad72e3ad68d79128bc0a3fd584815b8f23a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058int enable_hs;
59
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
72/* HCI protocols */
73#define HCI_MAX_PROTO 2
74struct hci_proto *hci_proto[HCI_MAX_PROTO];
75
76/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080077static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79/* ---- HCI notifications ---- */
80
81int hci_register_notifier(struct notifier_block *nb)
82{
Alan Sterne041c682006-03-27 01:16:30 -080083 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
85
86int hci_unregister_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
Marcel Holtmann65164552005-10-28 19:20:48 +020091static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
Alan Sterne041c682006-03-27 01:16:30 -080093 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
96/* ---- HCI requests ---- */
97
Johan Hedberg23bb5762010-12-21 23:01:27 +020098void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200100 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
101
Johan Hedberga5040ef2011-01-10 13:28:59 +0200102 /* If this is the init phase check if the completed command matches
103 * the last init command, and if not just return.
104 */
105 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200106 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100128 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Johan Hedberga5040ef2011-01-10 13:28:59 +0200162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100170 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret;
173
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300190 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200196 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Mandatory initialization */
203
204 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200234 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700238 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246static void amp_init(struct hci_dev *hdev)
247{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
Marcel Holtmanna418b892008-11-30 12:17:28 +0100333 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900339/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200343 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200351 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/* ---- Inquiry support ---- */
362static void inquiry_cache_flush(struct hci_dev *hdev)
363{
364 struct inquiry_cache *cache = &hdev->inq_cache;
365 struct inquiry_entry *next = cache->list, *e;
366
367 BT_DBG("cache %p", cache);
368
369 cache->list = NULL;
370 while ((e = next)) {
371 next = e->next;
372 kfree(e);
373 }
374}
375
376struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
377{
378 struct inquiry_cache *cache = &hdev->inq_cache;
379 struct inquiry_entry *e;
380
381 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
382
383 for (e = cache->list; e; e = e->next)
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 break;
386 return e;
387}
388
389void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200392 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
394 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
395
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200396 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
397 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200399 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
400 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200402
403 ie->next = cache->list;
404 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 }
406
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200407 memcpy(&ie->data, data, sizeof(*data));
408 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 cache->timestamp = jiffies;
410}
411
412static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
413{
414 struct inquiry_cache *cache = &hdev->inq_cache;
415 struct inquiry_info *info = (struct inquiry_info *) buf;
416 struct inquiry_entry *e;
417 int copied = 0;
418
419 for (e = cache->list; e && copied < num; e = e->next, copied++) {
420 struct inquiry_data *data = &e->data;
421 bacpy(&info->bdaddr, &data->bdaddr);
422 info->pscan_rep_mode = data->pscan_rep_mode;
423 info->pscan_period_mode = data->pscan_period_mode;
424 info->pscan_mode = data->pscan_mode;
425 memcpy(info->dev_class, data->dev_class, 3);
426 info->clock_offset = data->clock_offset;
427 info++;
428 }
429
430 BT_DBG("cache %p, copied %d", cache, copied);
431 return copied;
432}
433
434static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
435{
436 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
437 struct hci_cp_inquiry cp;
438
439 BT_DBG("%s", hdev->name);
440
441 if (test_bit(HCI_INQUIRY, &hdev->flags))
442 return;
443
444 /* Start Inquiry */
445 memcpy(&cp.lap, &ir->lap, 3);
446 cp.length = ir->length;
447 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200448 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449}
450
451int hci_inquiry(void __user *arg)
452{
453 __u8 __user *ptr = arg;
454 struct hci_inquiry_req ir;
455 struct hci_dev *hdev;
456 int err = 0, do_inquiry = 0, max_rsp;
457 long timeo;
458 __u8 *buf;
459
460 if (copy_from_user(&ir, ptr, sizeof(ir)))
461 return -EFAULT;
462
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200463 hdev = hci_dev_get(ir.dev_id);
464 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 return -ENODEV;
466
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300467 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900468 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200469 inquiry_cache_empty(hdev) ||
470 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 inquiry_cache_flush(hdev);
472 do_inquiry = 1;
473 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300474 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Marcel Holtmann04837f62006-07-03 10:02:33 +0200476 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200477
478 if (do_inquiry) {
479 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
480 if (err < 0)
481 goto done;
482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 /* for unlimited number of responses we will use buffer with 255 entries */
485 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
486
487 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
488 * copy it to the user space.
489 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100490 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 err = -ENOMEM;
493 goto done;
494 }
495
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300496 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300498 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
500 BT_DBG("num_rsp %d", ir.num_rsp);
501
502 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
503 ptr += sizeof(ir);
504 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
505 ir.num_rsp))
506 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900507 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 err = -EFAULT;
509
510 kfree(buf);
511
512done:
513 hci_dev_put(hdev);
514 return err;
515}
516
517/* ---- HCI ioctl helpers ---- */
518
519int hci_dev_open(__u16 dev)
520{
521 struct hci_dev *hdev;
522 int ret = 0;
523
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200524 hdev = hci_dev_get(dev);
525 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 return -ENODEV;
527
528 BT_DBG("%s %p", hdev->name, hdev);
529
530 hci_req_lock(hdev);
531
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200532 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
533 ret = -ERFKILL;
534 goto done;
535 }
536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 if (test_bit(HCI_UP, &hdev->flags)) {
538 ret = -EALREADY;
539 goto done;
540 }
541
542 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
543 set_bit(HCI_RAW, &hdev->flags);
544
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200545 /* Treat all non BR/EDR controllers as raw devices if
546 enable_hs is not set */
547 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100548 set_bit(HCI_RAW, &hdev->flags);
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (hdev->open(hdev)) {
551 ret = -EIO;
552 goto done;
553 }
554
555 if (!test_bit(HCI_RAW, &hdev->flags)) {
556 atomic_set(&hdev->cmd_cnt, 1);
557 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200558 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
Marcel Holtmann04837f62006-07-03 10:02:33 +0200560 ret = __hci_request(hdev, hci_init_req, 0,
561 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Andre Guedeseead27d2011-06-30 19:20:55 -0300563 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300564 ret = __hci_request(hdev, hci_le_init_req, 0,
565 msecs_to_jiffies(HCI_INIT_TIMEOUT));
566
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 clear_bit(HCI_INIT, &hdev->flags);
568 }
569
570 if (!ret) {
571 hci_dev_hold(hdev);
572 set_bit(HCI_UP, &hdev->flags);
573 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200574 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300575 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200576 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300577 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200578 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900579 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200581 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200582 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400583 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 skb_queue_purge(&hdev->cmd_q);
586 skb_queue_purge(&hdev->rx_q);
587
588 if (hdev->flush)
589 hdev->flush(hdev);
590
591 if (hdev->sent_cmd) {
592 kfree_skb(hdev->sent_cmd);
593 hdev->sent_cmd = NULL;
594 }
595
596 hdev->close(hdev);
597 hdev->flags = 0;
598 }
599
600done:
601 hci_req_unlock(hdev);
602 hci_dev_put(hdev);
603 return ret;
604}
605
606static int hci_dev_do_close(struct hci_dev *hdev)
607{
608 BT_DBG("%s %p", hdev->name, hdev);
609
610 hci_req_cancel(hdev, ENODEV);
611 hci_req_lock(hdev);
612
613 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300614 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 hci_req_unlock(hdev);
616 return 0;
617 }
618
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200619 /* Flush RX and TX works */
620 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400621 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200623 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200624 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200625 hdev->discov_timeout = 0;
626 }
627
Johan Hedberg32435532011-11-07 22:16:04 +0200628 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200629 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200630
Johan Hedberg7d785252011-12-15 00:47:39 +0200631 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
632 cancel_delayed_work(&hdev->service_cache);
633
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300634 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 inquiry_cache_flush(hdev);
636 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300637 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639 hci_notify(hdev, HCI_DEV_DOWN);
640
641 if (hdev->flush)
642 hdev->flush(hdev);
643
644 /* Reset device */
645 skb_queue_purge(&hdev->cmd_q);
646 atomic_set(&hdev->cmd_cnt, 1);
647 if (!test_bit(HCI_RAW, &hdev->flags)) {
648 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200649 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200650 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 clear_bit(HCI_INIT, &hdev->flags);
652 }
653
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200654 /* flush cmd work */
655 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
657 /* Drop queues */
658 skb_queue_purge(&hdev->rx_q);
659 skb_queue_purge(&hdev->cmd_q);
660 skb_queue_purge(&hdev->raw_q);
661
662 /* Drop last sent command */
663 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300664 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 kfree_skb(hdev->sent_cmd);
666 hdev->sent_cmd = NULL;
667 }
668
669 /* After this point our queues are empty
670 * and no tasks are scheduled. */
671 hdev->close(hdev);
672
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300673 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200674 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300675 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 /* Clear flags */
678 hdev->flags = 0;
679
680 hci_req_unlock(hdev);
681
682 hci_dev_put(hdev);
683 return 0;
684}
685
686int hci_dev_close(__u16 dev)
687{
688 struct hci_dev *hdev;
689 int err;
690
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200691 hdev = hci_dev_get(dev);
692 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 return -ENODEV;
694 err = hci_dev_do_close(hdev);
695 hci_dev_put(hdev);
696 return err;
697}
698
699int hci_dev_reset(__u16 dev)
700{
701 struct hci_dev *hdev;
702 int ret = 0;
703
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200704 hdev = hci_dev_get(dev);
705 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return -ENODEV;
707
708 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710 if (!test_bit(HCI_UP, &hdev->flags))
711 goto done;
712
713 /* Drop queues */
714 skb_queue_purge(&hdev->rx_q);
715 skb_queue_purge(&hdev->cmd_q);
716
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300717 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 inquiry_cache_flush(hdev);
719 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300720 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
722 if (hdev->flush)
723 hdev->flush(hdev);
724
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900725 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300726 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
728 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200729 ret = __hci_request(hdev, hci_reset_req, 0,
730 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
732done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 hci_req_unlock(hdev);
734 hci_dev_put(hdev);
735 return ret;
736}
737
738int hci_dev_reset_stat(__u16 dev)
739{
740 struct hci_dev *hdev;
741 int ret = 0;
742
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200743 hdev = hci_dev_get(dev);
744 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 return -ENODEV;
746
747 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
748
749 hci_dev_put(hdev);
750
751 return ret;
752}
753
754int hci_dev_cmd(unsigned int cmd, void __user *arg)
755{
756 struct hci_dev *hdev;
757 struct hci_dev_req dr;
758 int err = 0;
759
760 if (copy_from_user(&dr, arg, sizeof(dr)))
761 return -EFAULT;
762
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200763 hdev = hci_dev_get(dr.dev_id);
764 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 return -ENODEV;
766
767 switch (cmd) {
768 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200769 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
770 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 break;
772
773 case HCISETENCRYPT:
774 if (!lmp_encrypt_capable(hdev)) {
775 err = -EOPNOTSUPP;
776 break;
777 }
778
779 if (!test_bit(HCI_AUTH, &hdev->flags)) {
780 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200781 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
782 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 if (err)
784 break;
785 }
786
Marcel Holtmann04837f62006-07-03 10:02:33 +0200787 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
788 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 break;
790
791 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200792 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
793 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 break;
795
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200796 case HCISETLINKPOL:
797 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
798 msecs_to_jiffies(HCI_INIT_TIMEOUT));
799 break;
800
801 case HCISETLINKMODE:
802 hdev->link_mode = ((__u16) dr.dev_opt) &
803 (HCI_LM_MASTER | HCI_LM_ACCEPT);
804 break;
805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 case HCISETPTYPE:
807 hdev->pkt_type = (__u16) dr.dev_opt;
808 break;
809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200811 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
812 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 break;
814
815 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200816 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
817 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 break;
819
820 default:
821 err = -EINVAL;
822 break;
823 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 hci_dev_put(hdev);
826 return err;
827}
828
829int hci_get_dev_list(void __user *arg)
830{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200831 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 struct hci_dev_list_req *dl;
833 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 int n = 0, size, err;
835 __u16 dev_num;
836
837 if (get_user(dev_num, (__u16 __user *) arg))
838 return -EFAULT;
839
840 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
841 return -EINVAL;
842
843 size = sizeof(*dl) + dev_num * sizeof(*dr);
844
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200845 dl = kzalloc(size, GFP_KERNEL);
846 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return -ENOMEM;
848
849 dr = dl->dev_req;
850
851 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200852 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200853 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200854 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200855
856 if (!test_bit(HCI_MGMT, &hdev->flags))
857 set_bit(HCI_PAIRABLE, &hdev->flags);
858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 (dr + n)->dev_id = hdev->id;
860 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 if (++n >= dev_num)
863 break;
864 }
865 read_unlock_bh(&hci_dev_list_lock);
866
867 dl->dev_num = n;
868 size = sizeof(*dl) + n * sizeof(*dr);
869
870 err = copy_to_user(arg, dl, size);
871 kfree(dl);
872
873 return err ? -EFAULT : 0;
874}
875
876int hci_get_dev_info(void __user *arg)
877{
878 struct hci_dev *hdev;
879 struct hci_dev_info di;
880 int err = 0;
881
882 if (copy_from_user(&di, arg, sizeof(di)))
883 return -EFAULT;
884
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200885 hdev = hci_dev_get(di.dev_id);
886 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 return -ENODEV;
888
Johan Hedberg32435532011-11-07 22:16:04 +0200889 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
890 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200891
Johan Hedbergc542a062011-01-26 13:11:03 +0200892 if (!test_bit(HCI_MGMT, &hdev->flags))
893 set_bit(HCI_PAIRABLE, &hdev->flags);
894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 strcpy(di.name, hdev->name);
896 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100897 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 di.flags = hdev->flags;
899 di.pkt_type = hdev->pkt_type;
900 di.acl_mtu = hdev->acl_mtu;
901 di.acl_pkts = hdev->acl_pkts;
902 di.sco_mtu = hdev->sco_mtu;
903 di.sco_pkts = hdev->sco_pkts;
904 di.link_policy = hdev->link_policy;
905 di.link_mode = hdev->link_mode;
906
907 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
908 memcpy(&di.features, &hdev->features, sizeof(di.features));
909
910 if (copy_to_user(arg, &di, sizeof(di)))
911 err = -EFAULT;
912
913 hci_dev_put(hdev);
914
915 return err;
916}
917
918/* ---- Interface to HCI drivers ---- */
919
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200920static int hci_rfkill_set_block(void *data, bool blocked)
921{
922 struct hci_dev *hdev = data;
923
924 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
925
926 if (!blocked)
927 return 0;
928
929 hci_dev_do_close(hdev);
930
931 return 0;
932}
933
934static const struct rfkill_ops hci_rfkill_ops = {
935 .set_block = hci_rfkill_set_block,
936};
937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938/* Alloc HCI device */
939struct hci_dev *hci_alloc_dev(void)
940{
941 struct hci_dev *hdev;
942
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200943 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 if (!hdev)
945 return NULL;
946
David Herrmann0ac7e702011-10-08 14:58:47 +0200947 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 skb_queue_head_init(&hdev->driver_init);
949
950 return hdev;
951}
952EXPORT_SYMBOL(hci_alloc_dev);
953
954/* Free HCI device */
955void hci_free_dev(struct hci_dev *hdev)
956{
957 skb_queue_purge(&hdev->driver_init);
958
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200959 /* will free via device release */
960 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961}
962EXPORT_SYMBOL(hci_free_dev);
963
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200964static void hci_power_on(struct work_struct *work)
965{
966 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
967
968 BT_DBG("%s", hdev->name);
969
970 if (hci_dev_open(hdev->id) < 0)
971 return;
972
973 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -0200974 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +0200975 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200976
977 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200978 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200979}
980
981static void hci_power_off(struct work_struct *work)
982{
Johan Hedberg32435532011-11-07 22:16:04 +0200983 struct hci_dev *hdev = container_of(work, struct hci_dev,
984 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200985
986 BT_DBG("%s", hdev->name);
987
Johan Hedberg32435532011-11-07 22:16:04 +0200988 clear_bit(HCI_AUTO_OFF, &hdev->flags);
989
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200990 hci_dev_close(hdev->id);
991}
992
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200993static void hci_discov_off(struct work_struct *work)
994{
995 struct hci_dev *hdev;
996 u8 scan = SCAN_PAGE;
997
998 hdev = container_of(work, struct hci_dev, discov_off.work);
999
1000 BT_DBG("%s", hdev->name);
1001
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001002 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001003
1004 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1005
1006 hdev->discov_timeout = 0;
1007
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001008 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001009}
1010
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001011int hci_uuids_clear(struct hci_dev *hdev)
1012{
1013 struct list_head *p, *n;
1014
1015 list_for_each_safe(p, n, &hdev->uuids) {
1016 struct bt_uuid *uuid;
1017
1018 uuid = list_entry(p, struct bt_uuid, list);
1019
1020 list_del(p);
1021 kfree(uuid);
1022 }
1023
1024 return 0;
1025}
1026
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001027int hci_link_keys_clear(struct hci_dev *hdev)
1028{
1029 struct list_head *p, *n;
1030
1031 list_for_each_safe(p, n, &hdev->link_keys) {
1032 struct link_key *key;
1033
1034 key = list_entry(p, struct link_key, list);
1035
1036 list_del(p);
1037 kfree(key);
1038 }
1039
1040 return 0;
1041}
1042
1043struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1044{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001045 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001046
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001047 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001048 if (bacmp(bdaddr, &k->bdaddr) == 0)
1049 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001050
1051 return NULL;
1052}
1053
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001054static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1055 u8 key_type, u8 old_key_type)
1056{
1057 /* Legacy key */
1058 if (key_type < 0x03)
1059 return 1;
1060
1061 /* Debug keys are insecure so don't store them persistently */
1062 if (key_type == HCI_LK_DEBUG_COMBINATION)
1063 return 0;
1064
1065 /* Changed combination key and there's no previous one */
1066 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1067 return 0;
1068
1069 /* Security mode 3 case */
1070 if (!conn)
1071 return 1;
1072
1073 /* Neither local nor remote side had no-bonding as requirement */
1074 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1075 return 1;
1076
1077 /* Local side had dedicated bonding as requirement */
1078 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1079 return 1;
1080
1081 /* Remote side had dedicated bonding as requirement */
1082 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1083 return 1;
1084
1085 /* If none of the above criteria match, then don't store the key
1086 * persistently */
1087 return 0;
1088}
1089
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001090struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1091{
1092 struct link_key *k;
1093
1094 list_for_each_entry(k, &hdev->link_keys, list) {
1095 struct key_master_id *id;
1096
1097 if (k->type != HCI_LK_SMP_LTK)
1098 continue;
1099
1100 if (k->dlen != sizeof(*id))
1101 continue;
1102
1103 id = (void *) &k->data;
1104 if (id->ediv == ediv &&
1105 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1106 return k;
1107 }
1108
1109 return NULL;
1110}
1111EXPORT_SYMBOL(hci_find_ltk);
1112
1113struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1114 bdaddr_t *bdaddr, u8 type)
1115{
1116 struct link_key *k;
1117
1118 list_for_each_entry(k, &hdev->link_keys, list)
1119 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1120 return k;
1121
1122 return NULL;
1123}
1124EXPORT_SYMBOL(hci_find_link_key_type);
1125
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001126int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1127 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001128{
1129 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001130 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001131
1132 old_key = hci_find_link_key(hdev, bdaddr);
1133 if (old_key) {
1134 old_key_type = old_key->type;
1135 key = old_key;
1136 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001137 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001138 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1139 if (!key)
1140 return -ENOMEM;
1141 list_add(&key->list, &hdev->link_keys);
1142 }
1143
1144 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1145
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001146 /* Some buggy controller combinations generate a changed
1147 * combination key for legacy pairing even when there's no
1148 * previous key */
1149 if (type == HCI_LK_CHANGED_COMBINATION &&
1150 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001151 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001152 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001153 if (conn)
1154 conn->key_type = type;
1155 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001156
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001157 bacpy(&key->bdaddr, bdaddr);
1158 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001159 key->pin_len = pin_len;
1160
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001161 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001162 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001163 else
1164 key->type = type;
1165
Johan Hedberg4df378a2011-04-28 11:29:03 -07001166 if (!new_key)
1167 return 0;
1168
1169 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1170
Johan Hedberg744cf192011-11-08 20:40:14 +02001171 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001172
1173 if (!persistent) {
1174 list_del(&key->list);
1175 kfree(key);
1176 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001177
1178 return 0;
1179}
1180
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001181int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001182 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001183{
1184 struct link_key *key, *old_key;
1185 struct key_master_id *id;
1186 u8 old_key_type;
1187
1188 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1189
1190 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1191 if (old_key) {
1192 key = old_key;
1193 old_key_type = old_key->type;
1194 } else {
1195 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1196 if (!key)
1197 return -ENOMEM;
1198 list_add(&key->list, &hdev->link_keys);
1199 old_key_type = 0xff;
1200 }
1201
1202 key->dlen = sizeof(*id);
1203
1204 bacpy(&key->bdaddr, bdaddr);
1205 memcpy(key->val, ltk, sizeof(key->val));
1206 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001207 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001208
1209 id = (void *) &key->data;
1210 id->ediv = ediv;
1211 memcpy(id->rand, rand, sizeof(id->rand));
1212
1213 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001214 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001215
1216 return 0;
1217}
1218
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001219int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1220{
1221 struct link_key *key;
1222
1223 key = hci_find_link_key(hdev, bdaddr);
1224 if (!key)
1225 return -ENOENT;
1226
1227 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1228
1229 list_del(&key->list);
1230 kfree(key);
1231
1232 return 0;
1233}
1234
Ville Tervo6bd32322011-02-16 16:32:41 +02001235/* HCI command timer function */
1236static void hci_cmd_timer(unsigned long arg)
1237{
1238 struct hci_dev *hdev = (void *) arg;
1239
1240 BT_ERR("%s command tx timeout", hdev->name);
1241 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001242 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001243}
1244
Szymon Janc2763eda2011-03-22 13:12:22 +01001245struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1246 bdaddr_t *bdaddr)
1247{
1248 struct oob_data *data;
1249
1250 list_for_each_entry(data, &hdev->remote_oob_data, list)
1251 if (bacmp(bdaddr, &data->bdaddr) == 0)
1252 return data;
1253
1254 return NULL;
1255}
1256
1257int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1258{
1259 struct oob_data *data;
1260
1261 data = hci_find_remote_oob_data(hdev, bdaddr);
1262 if (!data)
1263 return -ENOENT;
1264
1265 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1266
1267 list_del(&data->list);
1268 kfree(data);
1269
1270 return 0;
1271}
1272
1273int hci_remote_oob_data_clear(struct hci_dev *hdev)
1274{
1275 struct oob_data *data, *n;
1276
1277 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1278 list_del(&data->list);
1279 kfree(data);
1280 }
1281
1282 return 0;
1283}
1284
1285int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1286 u8 *randomizer)
1287{
1288 struct oob_data *data;
1289
1290 data = hci_find_remote_oob_data(hdev, bdaddr);
1291
1292 if (!data) {
1293 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1294 if (!data)
1295 return -ENOMEM;
1296
1297 bacpy(&data->bdaddr, bdaddr);
1298 list_add(&data->list, &hdev->remote_oob_data);
1299 }
1300
1301 memcpy(data->hash, hash, sizeof(data->hash));
1302 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1303
1304 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1305
1306 return 0;
1307}
1308
Antti Julkub2a66aa2011-06-15 12:01:14 +03001309struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1310 bdaddr_t *bdaddr)
1311{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001312 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001313
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001314 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001315 if (bacmp(bdaddr, &b->bdaddr) == 0)
1316 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001317
1318 return NULL;
1319}
1320
1321int hci_blacklist_clear(struct hci_dev *hdev)
1322{
1323 struct list_head *p, *n;
1324
1325 list_for_each_safe(p, n, &hdev->blacklist) {
1326 struct bdaddr_list *b;
1327
1328 b = list_entry(p, struct bdaddr_list, list);
1329
1330 list_del(p);
1331 kfree(b);
1332 }
1333
1334 return 0;
1335}
1336
1337int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1338{
1339 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001340
1341 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1342 return -EBADF;
1343
Antti Julku5e762442011-08-25 16:48:02 +03001344 if (hci_blacklist_lookup(hdev, bdaddr))
1345 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001346
1347 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001348 if (!entry)
1349 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001350
1351 bacpy(&entry->bdaddr, bdaddr);
1352
1353 list_add(&entry->list, &hdev->blacklist);
1354
Johan Hedberg744cf192011-11-08 20:40:14 +02001355 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001356}
1357
1358int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1359{
1360 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001361
Szymon Janc1ec918c2011-11-16 09:32:21 +01001362 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001363 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001364
1365 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001366 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001367 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001368
1369 list_del(&entry->list);
1370 kfree(entry);
1371
Johan Hedberg744cf192011-11-08 20:40:14 +02001372 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001373}
1374
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001375static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001376{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001377 struct hci_dev *hdev = container_of(work, struct hci_dev,
1378 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001379
1380 hci_dev_lock(hdev);
1381
1382 hci_adv_entries_clear(hdev);
1383
1384 hci_dev_unlock(hdev);
1385}
1386
Andre Guedes76c86862011-05-26 16:23:50 -03001387int hci_adv_entries_clear(struct hci_dev *hdev)
1388{
1389 struct adv_entry *entry, *tmp;
1390
1391 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1392 list_del(&entry->list);
1393 kfree(entry);
1394 }
1395
1396 BT_DBG("%s adv cache cleared", hdev->name);
1397
1398 return 0;
1399}
1400
1401struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1402{
1403 struct adv_entry *entry;
1404
1405 list_for_each_entry(entry, &hdev->adv_entries, list)
1406 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1407 return entry;
1408
1409 return NULL;
1410}
1411
1412static inline int is_connectable_adv(u8 evt_type)
1413{
1414 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1415 return 1;
1416
1417 return 0;
1418}
1419
1420int hci_add_adv_entry(struct hci_dev *hdev,
1421 struct hci_ev_le_advertising_info *ev)
1422{
1423 struct adv_entry *entry;
1424
1425 if (!is_connectable_adv(ev->evt_type))
1426 return -EINVAL;
1427
1428 /* Only new entries should be added to adv_entries. So, if
1429 * bdaddr was found, don't add it. */
1430 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1431 return 0;
1432
1433 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1434 if (!entry)
1435 return -ENOMEM;
1436
1437 bacpy(&entry->bdaddr, &ev->bdaddr);
1438 entry->bdaddr_type = ev->bdaddr_type;
1439
1440 list_add(&entry->list, &hdev->adv_entries);
1441
1442 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1443 batostr(&entry->bdaddr), entry->bdaddr_type);
1444
1445 return 0;
1446}
1447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448/* Register HCI device */
1449int hci_register_dev(struct hci_dev *hdev)
1450{
1451 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001452 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001454 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1455 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
1457 if (!hdev->open || !hdev->close || !hdev->destruct)
1458 return -EINVAL;
1459
Mat Martineau08add512011-11-02 16:18:36 -07001460 /* Do not allow HCI_AMP devices to register at index 0,
1461 * so the index can be used as the AMP controller ID.
1462 */
1463 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1464
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 write_lock_bh(&hci_dev_list_lock);
1466
1467 /* Find first available device id */
1468 list_for_each(p, &hci_dev_list) {
1469 if (list_entry(p, struct hci_dev, list)->id != id)
1470 break;
1471 head = p; id++;
1472 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001473
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 sprintf(hdev->name, "hci%d", id);
1475 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001476 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001479 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
1481 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001482 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001484 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001486 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
Marcel Holtmann04837f62006-07-03 10:02:33 +02001488 hdev->idle_timeout = 0;
1489 hdev->sniff_max_interval = 800;
1490 hdev->sniff_min_interval = 80;
1491
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001492 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001493 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001494 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497 skb_queue_head_init(&hdev->rx_q);
1498 skb_queue_head_init(&hdev->cmd_q);
1499 skb_queue_head_init(&hdev->raw_q);
1500
Ville Tervo6bd32322011-02-16 16:32:41 +02001501 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1502
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301503 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001504 hdev->reassembly[i] = NULL;
1505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001507 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
1509 inquiry_cache_init(hdev);
1510
1511 hci_conn_hash_init(hdev);
1512
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001513 INIT_LIST_HEAD(&hdev->mgmt_pending);
1514
David Millerea4bd8b2010-07-30 21:54:49 -07001515 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001516
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001517 INIT_LIST_HEAD(&hdev->uuids);
1518
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001519 INIT_LIST_HEAD(&hdev->link_keys);
1520
Szymon Janc2763eda2011-03-22 13:12:22 +01001521 INIT_LIST_HEAD(&hdev->remote_oob_data);
1522
Andre Guedes76c86862011-05-26 16:23:50 -03001523 INIT_LIST_HEAD(&hdev->adv_entries);
1524
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001525 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001526 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001527 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001528
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001529 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1532
1533 atomic_set(&hdev->promisc, 0);
1534
1535 write_unlock_bh(&hci_dev_list_lock);
1536
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001537 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1538 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001539 if (!hdev->workqueue) {
1540 error = -ENOMEM;
1541 goto err;
1542 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001543
David Herrmann33ca9542011-10-08 14:58:49 +02001544 error = hci_add_sysfs(hdev);
1545 if (error < 0)
1546 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001548 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1549 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1550 if (hdev->rfkill) {
1551 if (rfkill_register(hdev->rfkill) < 0) {
1552 rfkill_destroy(hdev->rfkill);
1553 hdev->rfkill = NULL;
1554 }
1555 }
1556
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001557 set_bit(HCI_AUTO_OFF, &hdev->flags);
1558 set_bit(HCI_SETUP, &hdev->flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001559 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 hci_notify(hdev, HCI_DEV_REG);
1562
1563 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001564
David Herrmann33ca9542011-10-08 14:58:49 +02001565err_wqueue:
1566 destroy_workqueue(hdev->workqueue);
1567err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001568 write_lock_bh(&hci_dev_list_lock);
1569 list_del(&hdev->list);
1570 write_unlock_bh(&hci_dev_list_lock);
1571
David Herrmann33ca9542011-10-08 14:58:49 +02001572 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
1574EXPORT_SYMBOL(hci_register_dev);
1575
1576/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001577void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578{
Marcel Holtmannef222012007-07-11 06:42:04 +02001579 int i;
1580
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001581 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 write_lock_bh(&hci_dev_list_lock);
1584 list_del(&hdev->list);
1585 write_unlock_bh(&hci_dev_list_lock);
1586
1587 hci_dev_do_close(hdev);
1588
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301589 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001590 kfree_skb(hdev->reassembly[i]);
1591
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001592 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001593 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001594 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001595 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001596 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001597 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001598
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001599 /* mgmt_index_removed should take care of emptying the
1600 * pending list */
1601 BUG_ON(!list_empty(&hdev->mgmt_pending));
1602
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 hci_notify(hdev, HCI_DEV_UNREG);
1604
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001605 if (hdev->rfkill) {
1606 rfkill_unregister(hdev->rfkill);
1607 rfkill_destroy(hdev->rfkill);
1608 }
1609
David Herrmannce242972011-10-08 14:58:48 +02001610 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001611
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001612 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001613
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001614 destroy_workqueue(hdev->workqueue);
1615
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001616 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001617 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001618 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001619 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001620 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001621 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001622 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625}
1626EXPORT_SYMBOL(hci_unregister_dev);
1627
1628/* Suspend HCI device */
1629int hci_suspend_dev(struct hci_dev *hdev)
1630{
1631 hci_notify(hdev, HCI_DEV_SUSPEND);
1632 return 0;
1633}
1634EXPORT_SYMBOL(hci_suspend_dev);
1635
1636/* Resume HCI device */
1637int hci_resume_dev(struct hci_dev *hdev)
1638{
1639 hci_notify(hdev, HCI_DEV_RESUME);
1640 return 0;
1641}
1642EXPORT_SYMBOL(hci_resume_dev);
1643
Marcel Holtmann76bca882009-11-18 00:40:39 +01001644/* Receive frame from HCI drivers */
1645int hci_recv_frame(struct sk_buff *skb)
1646{
1647 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1648 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1649 && !test_bit(HCI_INIT, &hdev->flags))) {
1650 kfree_skb(skb);
1651 return -ENXIO;
1652 }
1653
1654 /* Incomming skb */
1655 bt_cb(skb)->incoming = 1;
1656
1657 /* Time stamp */
1658 __net_timestamp(skb);
1659
Marcel Holtmann76bca882009-11-18 00:40:39 +01001660 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001661 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001662
Marcel Holtmann76bca882009-11-18 00:40:39 +01001663 return 0;
1664}
1665EXPORT_SYMBOL(hci_recv_frame);
1666
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301667static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001668 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301669{
1670 int len = 0;
1671 int hlen = 0;
1672 int remain = count;
1673 struct sk_buff *skb;
1674 struct bt_skb_cb *scb;
1675
1676 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1677 index >= NUM_REASSEMBLY)
1678 return -EILSEQ;
1679
1680 skb = hdev->reassembly[index];
1681
1682 if (!skb) {
1683 switch (type) {
1684 case HCI_ACLDATA_PKT:
1685 len = HCI_MAX_FRAME_SIZE;
1686 hlen = HCI_ACL_HDR_SIZE;
1687 break;
1688 case HCI_EVENT_PKT:
1689 len = HCI_MAX_EVENT_SIZE;
1690 hlen = HCI_EVENT_HDR_SIZE;
1691 break;
1692 case HCI_SCODATA_PKT:
1693 len = HCI_MAX_SCO_SIZE;
1694 hlen = HCI_SCO_HDR_SIZE;
1695 break;
1696 }
1697
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001698 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301699 if (!skb)
1700 return -ENOMEM;
1701
1702 scb = (void *) skb->cb;
1703 scb->expect = hlen;
1704 scb->pkt_type = type;
1705
1706 skb->dev = (void *) hdev;
1707 hdev->reassembly[index] = skb;
1708 }
1709
1710 while (count) {
1711 scb = (void *) skb->cb;
1712 len = min(scb->expect, (__u16)count);
1713
1714 memcpy(skb_put(skb, len), data, len);
1715
1716 count -= len;
1717 data += len;
1718 scb->expect -= len;
1719 remain = count;
1720
1721 switch (type) {
1722 case HCI_EVENT_PKT:
1723 if (skb->len == HCI_EVENT_HDR_SIZE) {
1724 struct hci_event_hdr *h = hci_event_hdr(skb);
1725 scb->expect = h->plen;
1726
1727 if (skb_tailroom(skb) < scb->expect) {
1728 kfree_skb(skb);
1729 hdev->reassembly[index] = NULL;
1730 return -ENOMEM;
1731 }
1732 }
1733 break;
1734
1735 case HCI_ACLDATA_PKT:
1736 if (skb->len == HCI_ACL_HDR_SIZE) {
1737 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1738 scb->expect = __le16_to_cpu(h->dlen);
1739
1740 if (skb_tailroom(skb) < scb->expect) {
1741 kfree_skb(skb);
1742 hdev->reassembly[index] = NULL;
1743 return -ENOMEM;
1744 }
1745 }
1746 break;
1747
1748 case HCI_SCODATA_PKT:
1749 if (skb->len == HCI_SCO_HDR_SIZE) {
1750 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1751 scb->expect = h->dlen;
1752
1753 if (skb_tailroom(skb) < scb->expect) {
1754 kfree_skb(skb);
1755 hdev->reassembly[index] = NULL;
1756 return -ENOMEM;
1757 }
1758 }
1759 break;
1760 }
1761
1762 if (scb->expect == 0) {
1763 /* Complete frame */
1764
1765 bt_cb(skb)->pkt_type = type;
1766 hci_recv_frame(skb);
1767
1768 hdev->reassembly[index] = NULL;
1769 return remain;
1770 }
1771 }
1772
1773 return remain;
1774}
1775
Marcel Holtmannef222012007-07-11 06:42:04 +02001776int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1777{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301778 int rem = 0;
1779
Marcel Holtmannef222012007-07-11 06:42:04 +02001780 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1781 return -EILSEQ;
1782
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001783 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001784 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301785 if (rem < 0)
1786 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001787
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301788 data += (count - rem);
1789 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001790 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001791
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301792 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001793}
1794EXPORT_SYMBOL(hci_recv_fragment);
1795
Suraj Sumangala99811512010-07-14 13:02:19 +05301796#define STREAM_REASSEMBLY 0
1797
1798int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1799{
1800 int type;
1801 int rem = 0;
1802
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001803 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301804 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1805
1806 if (!skb) {
1807 struct { char type; } *pkt;
1808
1809 /* Start of the frame */
1810 pkt = data;
1811 type = pkt->type;
1812
1813 data++;
1814 count--;
1815 } else
1816 type = bt_cb(skb)->pkt_type;
1817
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001818 rem = hci_reassembly(hdev, type, data, count,
1819 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301820 if (rem < 0)
1821 return rem;
1822
1823 data += (count - rem);
1824 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001825 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301826
1827 return rem;
1828}
1829EXPORT_SYMBOL(hci_recv_stream_fragment);
1830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831/* ---- Interface to upper protocols ---- */
1832
Ulisses Furquimf2d64f62011-12-20 17:10:51 -02001833/* Register/Unregister protocols. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834int hci_register_proto(struct hci_proto *hp)
1835{
1836 int err = 0;
1837
1838 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1839
1840 if (hp->id >= HCI_MAX_PROTO)
1841 return -EINVAL;
1842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 if (!hci_proto[hp->id])
1844 hci_proto[hp->id] = hp;
1845 else
1846 err = -EEXIST;
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 return err;
1849}
1850EXPORT_SYMBOL(hci_register_proto);
1851
1852int hci_unregister_proto(struct hci_proto *hp)
1853{
1854 int err = 0;
1855
1856 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1857
1858 if (hp->id >= HCI_MAX_PROTO)
1859 return -EINVAL;
1860
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 if (hci_proto[hp->id])
1862 hci_proto[hp->id] = NULL;
1863 else
1864 err = -ENOENT;
1865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 return err;
1867}
1868EXPORT_SYMBOL(hci_unregister_proto);
1869
1870int hci_register_cb(struct hci_cb *cb)
1871{
1872 BT_DBG("%p name %s", cb, cb->name);
1873
1874 write_lock_bh(&hci_cb_list_lock);
1875 list_add(&cb->list, &hci_cb_list);
1876 write_unlock_bh(&hci_cb_list_lock);
1877
1878 return 0;
1879}
1880EXPORT_SYMBOL(hci_register_cb);
1881
1882int hci_unregister_cb(struct hci_cb *cb)
1883{
1884 BT_DBG("%p name %s", cb, cb->name);
1885
1886 write_lock_bh(&hci_cb_list_lock);
1887 list_del(&cb->list);
1888 write_unlock_bh(&hci_cb_list_lock);
1889
1890 return 0;
1891}
1892EXPORT_SYMBOL(hci_unregister_cb);
1893
1894static int hci_send_frame(struct sk_buff *skb)
1895{
1896 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1897
1898 if (!hdev) {
1899 kfree_skb(skb);
1900 return -ENODEV;
1901 }
1902
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001903 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
1905 if (atomic_read(&hdev->promisc)) {
1906 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001907 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001909 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 }
1911
1912 /* Get rid of skb owner, prior to sending to the driver. */
1913 skb_orphan(skb);
1914
1915 return hdev->send(skb);
1916}
1917
1918/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001919int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920{
1921 int len = HCI_COMMAND_HDR_SIZE + plen;
1922 struct hci_command_hdr *hdr;
1923 struct sk_buff *skb;
1924
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001925 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
1927 skb = bt_skb_alloc(len, GFP_ATOMIC);
1928 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001929 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 return -ENOMEM;
1931 }
1932
1933 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001934 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 hdr->plen = plen;
1936
1937 if (plen)
1938 memcpy(skb_put(skb, plen), param, plen);
1939
1940 BT_DBG("skb len %d", skb->len);
1941
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001942 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001944
Johan Hedberga5040ef2011-01-10 13:28:59 +02001945 if (test_bit(HCI_INIT, &hdev->flags))
1946 hdev->init_last_cmd = opcode;
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001949 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
1951 return 0;
1952}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001955void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956{
1957 struct hci_command_hdr *hdr;
1958
1959 if (!hdev->sent_cmd)
1960 return NULL;
1961
1962 hdr = (void *) hdev->sent_cmd->data;
1963
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001964 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 return NULL;
1966
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001967 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
1969 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1970}
1971
1972/* Send ACL data */
1973static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1974{
1975 struct hci_acl_hdr *hdr;
1976 int len = skb->len;
1977
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001978 skb_push(skb, HCI_ACL_HDR_SIZE);
1979 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001980 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001981 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1982 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983}
1984
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001985static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1986 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987{
1988 struct hci_dev *hdev = conn->hdev;
1989 struct sk_buff *list;
1990
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001991 list = skb_shinfo(skb)->frag_list;
1992 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 /* Non fragmented */
1994 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1995
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001996 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 } else {
1998 /* Fragmented */
1999 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2000
2001 skb_shinfo(skb)->frag_list = NULL;
2002
2003 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002004 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002006 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002007
2008 flags &= ~ACL_START;
2009 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 do {
2011 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002014 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002015 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
2017 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2018
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002019 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 } while (list);
2021
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002022 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002024}
2025
2026void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2027{
2028 struct hci_conn *conn = chan->conn;
2029 struct hci_dev *hdev = conn->hdev;
2030
2031 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2032
2033 skb->dev = (void *) hdev;
2034 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2035 hci_add_acl_hdr(skb, conn->handle, flags);
2036
2037 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002039 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040}
2041EXPORT_SYMBOL(hci_send_acl);
2042
2043/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002044void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045{
2046 struct hci_dev *hdev = conn->hdev;
2047 struct hci_sco_hdr hdr;
2048
2049 BT_DBG("%s len %d", hdev->name, skb->len);
2050
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002051 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 hdr.dlen = skb->len;
2053
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002054 skb_push(skb, HCI_SCO_HDR_SIZE);
2055 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002056 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002059 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002060
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002062 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063}
2064EXPORT_SYMBOL(hci_send_sco);
2065
2066/* ---- HCI TX task (outgoing data) ---- */
2067
2068/* HCI Connection scheduler */
2069static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2070{
2071 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002072 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002075 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002077
2078 rcu_read_lock();
2079
2080 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002081 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002083
2084 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2085 continue;
2086
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 num++;
2088
2089 if (c->sent < min) {
2090 min = c->sent;
2091 conn = c;
2092 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002093
2094 if (hci_conn_num(hdev, type) == num)
2095 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 }
2097
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002098 rcu_read_unlock();
2099
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002101 int cnt, q;
2102
2103 switch (conn->type) {
2104 case ACL_LINK:
2105 cnt = hdev->acl_cnt;
2106 break;
2107 case SCO_LINK:
2108 case ESCO_LINK:
2109 cnt = hdev->sco_cnt;
2110 break;
2111 case LE_LINK:
2112 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2113 break;
2114 default:
2115 cnt = 0;
2116 BT_ERR("Unknown link type");
2117 }
2118
2119 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 *quote = q ? q : 1;
2121 } else
2122 *quote = 0;
2123
2124 BT_DBG("conn %p quote %d", conn, *quote);
2125 return conn;
2126}
2127
Ville Tervobae1f5d92011-02-10 22:38:53 -03002128static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129{
2130 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002131 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Ville Tervobae1f5d92011-02-10 22:38:53 -03002133 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002135 rcu_read_lock();
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002138 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002139 if (c->type == type && c->sent) {
2140 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 hdev->name, batostr(&c->dst));
2142 hci_acl_disconn(c, 0x13);
2143 }
2144 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002145
2146 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147}
2148
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002149static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2150 int *quote)
2151{
2152 struct hci_conn_hash *h = &hdev->conn_hash;
2153 struct hci_chan *chan = NULL;
2154 int num = 0, min = ~0, cur_prio = 0;
2155 struct hci_conn *conn;
2156 int cnt, q, conn_num = 0;
2157
2158 BT_DBG("%s", hdev->name);
2159
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002160 rcu_read_lock();
2161
2162 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002163 struct hci_chan *tmp;
2164
2165 if (conn->type != type)
2166 continue;
2167
2168 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2169 continue;
2170
2171 conn_num++;
2172
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002173 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002174 struct sk_buff *skb;
2175
2176 if (skb_queue_empty(&tmp->data_q))
2177 continue;
2178
2179 skb = skb_peek(&tmp->data_q);
2180 if (skb->priority < cur_prio)
2181 continue;
2182
2183 if (skb->priority > cur_prio) {
2184 num = 0;
2185 min = ~0;
2186 cur_prio = skb->priority;
2187 }
2188
2189 num++;
2190
2191 if (conn->sent < min) {
2192 min = conn->sent;
2193 chan = tmp;
2194 }
2195 }
2196
2197 if (hci_conn_num(hdev, type) == conn_num)
2198 break;
2199 }
2200
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002201 rcu_read_unlock();
2202
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002203 if (!chan)
2204 return NULL;
2205
2206 switch (chan->conn->type) {
2207 case ACL_LINK:
2208 cnt = hdev->acl_cnt;
2209 break;
2210 case SCO_LINK:
2211 case ESCO_LINK:
2212 cnt = hdev->sco_cnt;
2213 break;
2214 case LE_LINK:
2215 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2216 break;
2217 default:
2218 cnt = 0;
2219 BT_ERR("Unknown link type");
2220 }
2221
2222 q = cnt / num;
2223 *quote = q ? q : 1;
2224 BT_DBG("chan %p quote %d", chan, *quote);
2225 return chan;
2226}
2227
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002228static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2229{
2230 struct hci_conn_hash *h = &hdev->conn_hash;
2231 struct hci_conn *conn;
2232 int num = 0;
2233
2234 BT_DBG("%s", hdev->name);
2235
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002236 rcu_read_lock();
2237
2238 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002239 struct hci_chan *chan;
2240
2241 if (conn->type != type)
2242 continue;
2243
2244 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2245 continue;
2246
2247 num++;
2248
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002249 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002250 struct sk_buff *skb;
2251
2252 if (chan->sent) {
2253 chan->sent = 0;
2254 continue;
2255 }
2256
2257 if (skb_queue_empty(&chan->data_q))
2258 continue;
2259
2260 skb = skb_peek(&chan->data_q);
2261 if (skb->priority >= HCI_PRIO_MAX - 1)
2262 continue;
2263
2264 skb->priority = HCI_PRIO_MAX - 1;
2265
2266 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2267 skb->priority);
2268 }
2269
2270 if (hci_conn_num(hdev, type) == num)
2271 break;
2272 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002273
2274 rcu_read_unlock();
2275
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002276}
2277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278static inline void hci_sched_acl(struct hci_dev *hdev)
2279{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002280 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 struct sk_buff *skb;
2282 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002283 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
2285 BT_DBG("%s", hdev->name);
2286
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002287 if (!hci_conn_num(hdev, ACL_LINK))
2288 return;
2289
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 if (!test_bit(HCI_RAW, &hdev->flags)) {
2291 /* ACL tx timeout must be longer than maximum
2292 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002293 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002294 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 }
2296
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002297 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002298
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002299 while (hdev->acl_cnt &&
2300 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002301 u32 priority = (skb_peek(&chan->data_q))->priority;
2302 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002303 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2304 skb->len, skb->priority);
2305
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002306 /* Stop if priority has changed */
2307 if (skb->priority < priority)
2308 break;
2309
2310 skb = skb_dequeue(&chan->data_q);
2311
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002312 hci_conn_enter_active_mode(chan->conn,
2313 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002314
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 hci_send_frame(skb);
2316 hdev->acl_last_tx = jiffies;
2317
2318 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002319 chan->sent++;
2320 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 }
2322 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002323
2324 if (cnt != hdev->acl_cnt)
2325 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326}
2327
2328/* Schedule SCO */
2329static inline void hci_sched_sco(struct hci_dev *hdev)
2330{
2331 struct hci_conn *conn;
2332 struct sk_buff *skb;
2333 int quote;
2334
2335 BT_DBG("%s", hdev->name);
2336
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002337 if (!hci_conn_num(hdev, SCO_LINK))
2338 return;
2339
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2341 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2342 BT_DBG("skb %p len %d", skb, skb->len);
2343 hci_send_frame(skb);
2344
2345 conn->sent++;
2346 if (conn->sent == ~0)
2347 conn->sent = 0;
2348 }
2349 }
2350}
2351
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002352static inline void hci_sched_esco(struct hci_dev *hdev)
2353{
2354 struct hci_conn *conn;
2355 struct sk_buff *skb;
2356 int quote;
2357
2358 BT_DBG("%s", hdev->name);
2359
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002360 if (!hci_conn_num(hdev, ESCO_LINK))
2361 return;
2362
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002363 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2364 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2365 BT_DBG("skb %p len %d", skb, skb->len);
2366 hci_send_frame(skb);
2367
2368 conn->sent++;
2369 if (conn->sent == ~0)
2370 conn->sent = 0;
2371 }
2372 }
2373}
2374
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002375static inline void hci_sched_le(struct hci_dev *hdev)
2376{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002377 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002378 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002379 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002380
2381 BT_DBG("%s", hdev->name);
2382
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002383 if (!hci_conn_num(hdev, LE_LINK))
2384 return;
2385
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002386 if (!test_bit(HCI_RAW, &hdev->flags)) {
2387 /* LE tx timeout must be longer than maximum
2388 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002389 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002390 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002391 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002392 }
2393
2394 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002395 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002396 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002397 u32 priority = (skb_peek(&chan->data_q))->priority;
2398 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002399 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2400 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002401
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002402 /* Stop if priority has changed */
2403 if (skb->priority < priority)
2404 break;
2405
2406 skb = skb_dequeue(&chan->data_q);
2407
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002408 hci_send_frame(skb);
2409 hdev->le_last_tx = jiffies;
2410
2411 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002412 chan->sent++;
2413 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002414 }
2415 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002416
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002417 if (hdev->le_pkts)
2418 hdev->le_cnt = cnt;
2419 else
2420 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002421
2422 if (cnt != tmp)
2423 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002424}
2425
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002426static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002428 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 struct sk_buff *skb;
2430
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002431 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2432 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
2434 /* Schedule queues and send stuff to HCI driver */
2435
2436 hci_sched_acl(hdev);
2437
2438 hci_sched_sco(hdev);
2439
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002440 hci_sched_esco(hdev);
2441
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002442 hci_sched_le(hdev);
2443
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 /* Send next queued raw (unknown type) packet */
2445 while ((skb = skb_dequeue(&hdev->raw_q)))
2446 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447}
2448
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002449/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450
2451/* ACL data packet */
2452static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2453{
2454 struct hci_acl_hdr *hdr = (void *) skb->data;
2455 struct hci_conn *conn;
2456 __u16 handle, flags;
2457
2458 skb_pull(skb, HCI_ACL_HDR_SIZE);
2459
2460 handle = __le16_to_cpu(hdr->handle);
2461 flags = hci_flags(handle);
2462 handle = hci_handle(handle);
2463
2464 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2465
2466 hdev->stat.acl_rx++;
2467
2468 hci_dev_lock(hdev);
2469 conn = hci_conn_hash_lookup_handle(hdev, handle);
2470 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002471
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 if (conn) {
2473 register struct hci_proto *hp;
2474
Mat Martineau65983fc2011-12-13 15:06:02 -08002475 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002476
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002478 hp = hci_proto[HCI_PROTO_L2CAP];
2479 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 hp->recv_acldata(conn, skb, flags);
2481 return;
2482 }
2483 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002484 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 hdev->name, handle);
2486 }
2487
2488 kfree_skb(skb);
2489}
2490
2491/* SCO data packet */
2492static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2493{
2494 struct hci_sco_hdr *hdr = (void *) skb->data;
2495 struct hci_conn *conn;
2496 __u16 handle;
2497
2498 skb_pull(skb, HCI_SCO_HDR_SIZE);
2499
2500 handle = __le16_to_cpu(hdr->handle);
2501
2502 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2503
2504 hdev->stat.sco_rx++;
2505
2506 hci_dev_lock(hdev);
2507 conn = hci_conn_hash_lookup_handle(hdev, handle);
2508 hci_dev_unlock(hdev);
2509
2510 if (conn) {
2511 register struct hci_proto *hp;
2512
2513 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002514 hp = hci_proto[HCI_PROTO_SCO];
2515 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 hp->recv_scodata(conn, skb);
2517 return;
2518 }
2519 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002520 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 hdev->name, handle);
2522 }
2523
2524 kfree_skb(skb);
2525}
2526
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002527static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002529 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 struct sk_buff *skb;
2531
2532 BT_DBG("%s", hdev->name);
2533
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 while ((skb = skb_dequeue(&hdev->rx_q))) {
2535 if (atomic_read(&hdev->promisc)) {
2536 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002537 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 }
2539
2540 if (test_bit(HCI_RAW, &hdev->flags)) {
2541 kfree_skb(skb);
2542 continue;
2543 }
2544
2545 if (test_bit(HCI_INIT, &hdev->flags)) {
2546 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002547 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 case HCI_ACLDATA_PKT:
2549 case HCI_SCODATA_PKT:
2550 kfree_skb(skb);
2551 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 }
2554
2555 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002556 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002558 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 hci_event_packet(hdev, skb);
2560 break;
2561
2562 case HCI_ACLDATA_PKT:
2563 BT_DBG("%s ACL data packet", hdev->name);
2564 hci_acldata_packet(hdev, skb);
2565 break;
2566
2567 case HCI_SCODATA_PKT:
2568 BT_DBG("%s SCO data packet", hdev->name);
2569 hci_scodata_packet(hdev, skb);
2570 break;
2571
2572 default:
2573 kfree_skb(skb);
2574 break;
2575 }
2576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577}
2578
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002579static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002581 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 struct sk_buff *skb;
2583
2584 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2585
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002587 if (atomic_read(&hdev->cmd_cnt)) {
2588 skb = skb_dequeue(&hdev->cmd_q);
2589 if (!skb)
2590 return;
2591
Wei Yongjun7585b972009-02-25 18:29:52 +08002592 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002594 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2595 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 atomic_dec(&hdev->cmd_cnt);
2597 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002598 if (test_bit(HCI_RESET, &hdev->flags))
2599 del_timer(&hdev->cmd_timer);
2600 else
2601 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002602 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 } else {
2604 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002605 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 }
2607 }
2608}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002609
2610int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2611{
2612 /* General inquiry access code (GIAC) */
2613 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2614 struct hci_cp_inquiry cp;
2615
2616 BT_DBG("%s", hdev->name);
2617
2618 if (test_bit(HCI_INQUIRY, &hdev->flags))
2619 return -EINPROGRESS;
2620
2621 memset(&cp, 0, sizeof(cp));
2622 memcpy(&cp.lap, lap, sizeof(cp.lap));
2623 cp.length = length;
2624
2625 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2626}
Andre Guedes023d50492011-11-04 14:16:52 -03002627
2628int hci_cancel_inquiry(struct hci_dev *hdev)
2629{
2630 BT_DBG("%s", hdev->name);
2631
2632 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2633 return -EPERM;
2634
2635 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2636}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002637
2638module_param(enable_hs, bool, 0644);
2639MODULE_PARM_DESC(enable_hs, "Enable High Speed");