blob: 1a4ec97d5ac4c7ea537a6c85e541a240aa1d3c2a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg);
56static void hci_notify(struct hci_dev *hdev, int event);
57
58static DEFINE_RWLOCK(hci_task_lock);
59
60/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
68/* HCI protocols */
69#define HCI_MAX_PROTO 2
70struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
94void hci_req_complete(struct hci_dev *hdev, int result)
95{
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
102 }
103}
104
105static void hci_req_cancel(struct hci_dev *hdev, int err)
106{
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900117static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 unsigned long opt, __u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 int err = 0;
122
123 BT_DBG("%s start", hdev->name);
124
125 hdev->req_status = HCI_REQ_PEND;
126
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
129
130 req(hdev, opt);
131 schedule_timeout(timeout);
132
133 remove_wait_queue(&hdev->req_wait_q, &wait);
134
135 if (signal_pending(current))
136 return -EINTR;
137
138 switch (hdev->req_status) {
139 case HCI_REQ_DONE:
140 err = -bt_err(hdev->req_result);
141 break;
142
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
145 break;
146
147 default:
148 err = -ETIMEDOUT;
149 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152 hdev->req_status = hdev->req_result = 0;
153
154 BT_DBG("%s end: err %d", hdev->name, err);
155
156 return err;
157}
158
159static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
161{
162 int ret;
163
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200164 if (!test_bit(HCI_UP, &hdev->flags))
165 return -ENETDOWN;
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 /* Serialize all requests */
168 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
171
172 return ret;
173}
174
175static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176{
177 BT_DBG("%s %ld", hdev->name, opt);
178
179 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
183static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184{
185 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800186 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200187 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Driver initialization */
192
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100199 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 }
201 skb_queue_purge(&hdev->driver_init);
202
203 /* Mandatory initialization */
204
205 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200212 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218#if 0
219 /* Host buffer size */
220 {
221 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 }
228#endif
229
230 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 /* Optional initialization */
243
244 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200245 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700249 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
257static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258{
259 __u8 scan = opt;
260
261 BT_DBG("%s %x", hdev->name, scan);
262
263 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265}
266
267static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268{
269 __u8 auth = opt;
270
271 BT_DBG("%s %x", hdev->name, auth);
272
273 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
277static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278{
279 __u8 encrypt = opt;
280
281 BT_DBG("%s %x", hdev->name, encrypt);
282
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200283 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200287static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288{
289 __le16 policy = cpu_to_le16(opt);
290
Marcel Holtmanna418b892008-11-30 12:17:28 +0100291 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200292
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295}
296
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900297/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 * Device is held on return. */
299struct hci_dev *hci_dev_get(int index)
300{
301 struct hci_dev *hdev = NULL;
302 struct list_head *p;
303
304 BT_DBG("%d", index);
305
306 if (index < 0)
307 return NULL;
308
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
314 break;
315 }
316 }
317 read_unlock(&hci_dev_list_lock);
318 return hdev;
319}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321/* ---- Inquiry support ---- */
322static void inquiry_cache_flush(struct hci_dev *hdev)
323{
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
326
327 BT_DBG("cache %p", cache);
328
329 cache->list = NULL;
330 while ((e = next)) {
331 next = e->next;
332 kfree(e);
333 }
334}
335
336struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337{
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
340
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
345 break;
346 return e;
347}
348
349void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350{
351 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200352 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200356 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
357 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200359 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
360 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200362
363 ie->next = cache->list;
364 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 }
366
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200367 memcpy(&ie->data, data, sizeof(*data));
368 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 cache->timestamp = jiffies;
370}
371
372static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
373{
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_info *info = (struct inquiry_info *) buf;
376 struct inquiry_entry *e;
377 int copied = 0;
378
379 for (e = cache->list; e && copied < num; e = e->next, copied++) {
380 struct inquiry_data *data = &e->data;
381 bacpy(&info->bdaddr, &data->bdaddr);
382 info->pscan_rep_mode = data->pscan_rep_mode;
383 info->pscan_period_mode = data->pscan_period_mode;
384 info->pscan_mode = data->pscan_mode;
385 memcpy(info->dev_class, data->dev_class, 3);
386 info->clock_offset = data->clock_offset;
387 info++;
388 }
389
390 BT_DBG("cache %p, copied %d", cache, copied);
391 return copied;
392}
393
394static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
395{
396 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
397 struct hci_cp_inquiry cp;
398
399 BT_DBG("%s", hdev->name);
400
401 if (test_bit(HCI_INQUIRY, &hdev->flags))
402 return;
403
404 /* Start Inquiry */
405 memcpy(&cp.lap, &ir->lap, 3);
406 cp.length = ir->length;
407 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200408 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409}
410
411int hci_inquiry(void __user *arg)
412{
413 __u8 __user *ptr = arg;
414 struct hci_inquiry_req ir;
415 struct hci_dev *hdev;
416 int err = 0, do_inquiry = 0, max_rsp;
417 long timeo;
418 __u8 *buf;
419
420 if (copy_from_user(&ir, ptr, sizeof(ir)))
421 return -EFAULT;
422
423 if (!(hdev = hci_dev_get(ir.dev_id)))
424 return -ENODEV;
425
426 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900427 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200428 inquiry_cache_empty(hdev) ||
429 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 inquiry_cache_flush(hdev);
431 do_inquiry = 1;
432 }
433 hci_dev_unlock_bh(hdev);
434
Marcel Holtmann04837f62006-07-03 10:02:33 +0200435 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200436
437 if (do_inquiry) {
438 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
439 if (err < 0)
440 goto done;
441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 /* for unlimited number of responses we will use buffer with 255 entries */
444 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
445
446 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
447 * copy it to the user space.
448 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200449 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
450 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 err = -ENOMEM;
452 goto done;
453 }
454
455 hci_dev_lock_bh(hdev);
456 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
457 hci_dev_unlock_bh(hdev);
458
459 BT_DBG("num_rsp %d", ir.num_rsp);
460
461 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
462 ptr += sizeof(ir);
463 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
464 ir.num_rsp))
465 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900466 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 err = -EFAULT;
468
469 kfree(buf);
470
471done:
472 hci_dev_put(hdev);
473 return err;
474}
475
476/* ---- HCI ioctl helpers ---- */
477
478int hci_dev_open(__u16 dev)
479{
480 struct hci_dev *hdev;
481 int ret = 0;
482
483 if (!(hdev = hci_dev_get(dev)))
484 return -ENODEV;
485
486 BT_DBG("%s %p", hdev->name, hdev);
487
488 hci_req_lock(hdev);
489
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200490 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
491 ret = -ERFKILL;
492 goto done;
493 }
494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 if (test_bit(HCI_UP, &hdev->flags)) {
496 ret = -EALREADY;
497 goto done;
498 }
499
500 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
501 set_bit(HCI_RAW, &hdev->flags);
502
Marcel Holtmann943da252010-02-13 02:28:41 +0100503 /* Treat all non BR/EDR controllers as raw devices for now */
504 if (hdev->dev_type != HCI_BREDR)
505 set_bit(HCI_RAW, &hdev->flags);
506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 if (hdev->open(hdev)) {
508 ret = -EIO;
509 goto done;
510 }
511
512 if (!test_bit(HCI_RAW, &hdev->flags)) {
513 atomic_set(&hdev->cmd_cnt, 1);
514 set_bit(HCI_INIT, &hdev->flags);
515
516 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200517 ret = __hci_request(hdev, hci_init_req, 0,
518 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520 clear_bit(HCI_INIT, &hdev->flags);
521 }
522
523 if (!ret) {
524 hci_dev_hold(hdev);
525 set_bit(HCI_UP, &hdev->flags);
526 hci_notify(hdev, HCI_DEV_UP);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900527 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 /* Init failed, cleanup */
529 tasklet_kill(&hdev->rx_task);
530 tasklet_kill(&hdev->tx_task);
531 tasklet_kill(&hdev->cmd_task);
532
533 skb_queue_purge(&hdev->cmd_q);
534 skb_queue_purge(&hdev->rx_q);
535
536 if (hdev->flush)
537 hdev->flush(hdev);
538
539 if (hdev->sent_cmd) {
540 kfree_skb(hdev->sent_cmd);
541 hdev->sent_cmd = NULL;
542 }
543
544 hdev->close(hdev);
545 hdev->flags = 0;
546 }
547
548done:
549 hci_req_unlock(hdev);
550 hci_dev_put(hdev);
551 return ret;
552}
553
554static int hci_dev_do_close(struct hci_dev *hdev)
555{
556 BT_DBG("%s %p", hdev->name, hdev);
557
558 hci_req_cancel(hdev, ENODEV);
559 hci_req_lock(hdev);
560
561 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
562 hci_req_unlock(hdev);
563 return 0;
564 }
565
566 /* Kill RX and TX tasks */
567 tasklet_kill(&hdev->rx_task);
568 tasklet_kill(&hdev->tx_task);
569
570 hci_dev_lock_bh(hdev);
571 inquiry_cache_flush(hdev);
572 hci_conn_hash_flush(hdev);
573 hci_dev_unlock_bh(hdev);
574
575 hci_notify(hdev, HCI_DEV_DOWN);
576
577 if (hdev->flush)
578 hdev->flush(hdev);
579
580 /* Reset device */
581 skb_queue_purge(&hdev->cmd_q);
582 atomic_set(&hdev->cmd_cnt, 1);
583 if (!test_bit(HCI_RAW, &hdev->flags)) {
584 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200585 __hci_request(hdev, hci_reset_req, 0,
586 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 clear_bit(HCI_INIT, &hdev->flags);
588 }
589
590 /* Kill cmd task */
591 tasklet_kill(&hdev->cmd_task);
592
593 /* Drop queues */
594 skb_queue_purge(&hdev->rx_q);
595 skb_queue_purge(&hdev->cmd_q);
596 skb_queue_purge(&hdev->raw_q);
597
598 /* Drop last sent command */
599 if (hdev->sent_cmd) {
600 kfree_skb(hdev->sent_cmd);
601 hdev->sent_cmd = NULL;
602 }
603
604 /* After this point our queues are empty
605 * and no tasks are scheduled. */
606 hdev->close(hdev);
607
608 /* Clear flags */
609 hdev->flags = 0;
610
611 hci_req_unlock(hdev);
612
613 hci_dev_put(hdev);
614 return 0;
615}
616
617int hci_dev_close(__u16 dev)
618{
619 struct hci_dev *hdev;
620 int err;
621
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200622 hdev = hci_dev_get(dev);
623 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 return -ENODEV;
625 err = hci_dev_do_close(hdev);
626 hci_dev_put(hdev);
627 return err;
628}
629
630int hci_dev_reset(__u16 dev)
631{
632 struct hci_dev *hdev;
633 int ret = 0;
634
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200635 hdev = hci_dev_get(dev);
636 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 return -ENODEV;
638
639 hci_req_lock(hdev);
640 tasklet_disable(&hdev->tx_task);
641
642 if (!test_bit(HCI_UP, &hdev->flags))
643 goto done;
644
645 /* Drop queues */
646 skb_queue_purge(&hdev->rx_q);
647 skb_queue_purge(&hdev->cmd_q);
648
649 hci_dev_lock_bh(hdev);
650 inquiry_cache_flush(hdev);
651 hci_conn_hash_flush(hdev);
652 hci_dev_unlock_bh(hdev);
653
654 if (hdev->flush)
655 hdev->flush(hdev);
656
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900657 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
659
660 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200661 ret = __hci_request(hdev, hci_reset_req, 0,
662 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
664done:
665 tasklet_enable(&hdev->tx_task);
666 hci_req_unlock(hdev);
667 hci_dev_put(hdev);
668 return ret;
669}
670
671int hci_dev_reset_stat(__u16 dev)
672{
673 struct hci_dev *hdev;
674 int ret = 0;
675
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200676 hdev = hci_dev_get(dev);
677 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return -ENODEV;
679
680 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
681
682 hci_dev_put(hdev);
683
684 return ret;
685}
686
687int hci_dev_cmd(unsigned int cmd, void __user *arg)
688{
689 struct hci_dev *hdev;
690 struct hci_dev_req dr;
691 int err = 0;
692
693 if (copy_from_user(&dr, arg, sizeof(dr)))
694 return -EFAULT;
695
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200696 hdev = hci_dev_get(dr.dev_id);
697 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return -ENODEV;
699
700 switch (cmd) {
701 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200702 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 break;
705
706 case HCISETENCRYPT:
707 if (!lmp_encrypt_capable(hdev)) {
708 err = -EOPNOTSUPP;
709 break;
710 }
711
712 if (!test_bit(HCI_AUTH, &hdev->flags)) {
713 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200714 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
715 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if (err)
717 break;
718 }
719
Marcel Holtmann04837f62006-07-03 10:02:33 +0200720 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
721 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 break;
723
724 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200725 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
726 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 break;
728
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200729 case HCISETLINKPOL:
730 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT));
732 break;
733
734 case HCISETLINKMODE:
735 hdev->link_mode = ((__u16) dr.dev_opt) &
736 (HCI_LM_MASTER | HCI_LM_ACCEPT);
737 break;
738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 case HCISETPTYPE:
740 hdev->pkt_type = (__u16) dr.dev_opt;
741 break;
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200744 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
745 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 break;
747
748 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200749 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
750 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 break;
752
753 default:
754 err = -EINVAL;
755 break;
756 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 hci_dev_put(hdev);
759 return err;
760}
761
762int hci_get_dev_list(void __user *arg)
763{
764 struct hci_dev_list_req *dl;
765 struct hci_dev_req *dr;
766 struct list_head *p;
767 int n = 0, size, err;
768 __u16 dev_num;
769
770 if (get_user(dev_num, (__u16 __user *) arg))
771 return -EFAULT;
772
773 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
774 return -EINVAL;
775
776 size = sizeof(*dl) + dev_num * sizeof(*dr);
777
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200778 dl = kzalloc(size, GFP_KERNEL);
779 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 return -ENOMEM;
781
782 dr = dl->dev_req;
783
784 read_lock_bh(&hci_dev_list_lock);
785 list_for_each(p, &hci_dev_list) {
786 struct hci_dev *hdev;
787 hdev = list_entry(p, struct hci_dev, list);
788 (dr + n)->dev_id = hdev->id;
789 (dr + n)->dev_opt = hdev->flags;
790 if (++n >= dev_num)
791 break;
792 }
793 read_unlock_bh(&hci_dev_list_lock);
794
795 dl->dev_num = n;
796 size = sizeof(*dl) + n * sizeof(*dr);
797
798 err = copy_to_user(arg, dl, size);
799 kfree(dl);
800
801 return err ? -EFAULT : 0;
802}
803
804int hci_get_dev_info(void __user *arg)
805{
806 struct hci_dev *hdev;
807 struct hci_dev_info di;
808 int err = 0;
809
810 if (copy_from_user(&di, arg, sizeof(di)))
811 return -EFAULT;
812
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200813 hdev = hci_dev_get(di.dev_id);
814 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 return -ENODEV;
816
817 strcpy(di.name, hdev->name);
818 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100819 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 di.flags = hdev->flags;
821 di.pkt_type = hdev->pkt_type;
822 di.acl_mtu = hdev->acl_mtu;
823 di.acl_pkts = hdev->acl_pkts;
824 di.sco_mtu = hdev->sco_mtu;
825 di.sco_pkts = hdev->sco_pkts;
826 di.link_policy = hdev->link_policy;
827 di.link_mode = hdev->link_mode;
828
829 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
830 memcpy(&di.features, &hdev->features, sizeof(di.features));
831
832 if (copy_to_user(arg, &di, sizeof(di)))
833 err = -EFAULT;
834
835 hci_dev_put(hdev);
836
837 return err;
838}
839
840/* ---- Interface to HCI drivers ---- */
841
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200842static int hci_rfkill_set_block(void *data, bool blocked)
843{
844 struct hci_dev *hdev = data;
845
846 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
847
848 if (!blocked)
849 return 0;
850
851 hci_dev_do_close(hdev);
852
853 return 0;
854}
855
856static const struct rfkill_ops hci_rfkill_ops = {
857 .set_block = hci_rfkill_set_block,
858};
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860/* Alloc HCI device */
861struct hci_dev *hci_alloc_dev(void)
862{
863 struct hci_dev *hdev;
864
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200865 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 if (!hdev)
867 return NULL;
868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 skb_queue_head_init(&hdev->driver_init);
870
871 return hdev;
872}
873EXPORT_SYMBOL(hci_alloc_dev);
874
875/* Free HCI device */
876void hci_free_dev(struct hci_dev *hdev)
877{
878 skb_queue_purge(&hdev->driver_init);
879
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200880 /* will free via device release */
881 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882}
883EXPORT_SYMBOL(hci_free_dev);
884
885/* Register HCI device */
886int hci_register_dev(struct hci_dev *hdev)
887{
888 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200889 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Marcel Holtmannc13854c2010-02-08 15:27:07 +0100891 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
892 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 if (!hdev->open || !hdev->close || !hdev->destruct)
895 return -EINVAL;
896
897 write_lock_bh(&hci_dev_list_lock);
898
899 /* Find first available device id */
900 list_for_each(p, &hci_dev_list) {
901 if (list_entry(p, struct hci_dev, list)->id != id)
902 break;
903 head = p; id++;
904 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900905
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 sprintf(hdev->name, "hci%d", id);
907 hdev->id = id;
908 list_add(&hdev->list, head);
909
910 atomic_set(&hdev->refcnt, 1);
911 spin_lock_init(&hdev->lock);
912
913 hdev->flags = 0;
914 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +0200915 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 hdev->link_mode = (HCI_LM_ACCEPT);
917
Marcel Holtmann04837f62006-07-03 10:02:33 +0200918 hdev->idle_timeout = 0;
919 hdev->sniff_max_interval = 800;
920 hdev->sniff_min_interval = 80;
921
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200922 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
924 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
925
926 skb_queue_head_init(&hdev->rx_q);
927 skb_queue_head_init(&hdev->cmd_q);
928 skb_queue_head_init(&hdev->raw_q);
929
Suraj Sumangalacd4c5392010-07-14 13:02:16 +0530930 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +0200931 hdev->reassembly[i] = NULL;
932
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +0000934 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936 inquiry_cache_init(hdev);
937
938 hci_conn_hash_init(hdev);
939
David Millerea4bd8b2010-07-30 21:54:49 -0700940 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +0200941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
943
944 atomic_set(&hdev->promisc, 0);
945
946 write_unlock_bh(&hci_dev_list_lock);
947
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100948 hdev->workqueue = create_singlethread_workqueue(hdev->name);
949 if (!hdev->workqueue)
950 goto nomem;
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 hci_register_sysfs(hdev);
953
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200954 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
955 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
956 if (hdev->rfkill) {
957 if (rfkill_register(hdev->rfkill) < 0) {
958 rfkill_destroy(hdev->rfkill);
959 hdev->rfkill = NULL;
960 }
961 }
962
Johan Hedbergc71e97b2010-12-13 21:07:07 +0200963 mgmt_index_added(hdev->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 hci_notify(hdev, HCI_DEV_REG);
965
966 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100967
968nomem:
969 write_lock_bh(&hci_dev_list_lock);
970 list_del(&hdev->list);
971 write_unlock_bh(&hci_dev_list_lock);
972
973 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974}
975EXPORT_SYMBOL(hci_register_dev);
976
977/* Unregister HCI device */
978int hci_unregister_dev(struct hci_dev *hdev)
979{
Marcel Holtmannef222012007-07-11 06:42:04 +0200980 int i;
981
Marcel Holtmannc13854c2010-02-08 15:27:07 +0100982 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 write_lock_bh(&hci_dev_list_lock);
985 list_del(&hdev->list);
986 write_unlock_bh(&hci_dev_list_lock);
987
988 hci_dev_do_close(hdev);
989
Suraj Sumangalacd4c5392010-07-14 13:02:16 +0530990 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +0200991 kfree_skb(hdev->reassembly[i]);
992
Johan Hedbergc71e97b2010-12-13 21:07:07 +0200993 mgmt_index_removed(hdev->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 hci_notify(hdev, HCI_DEV_UNREG);
995
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200996 if (hdev->rfkill) {
997 rfkill_unregister(hdev->rfkill);
998 rfkill_destroy(hdev->rfkill);
999 }
1000
Dave Young147e2d52008-03-05 18:45:59 -08001001 hci_unregister_sysfs(hdev);
1002
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001003 destroy_workqueue(hdev->workqueue);
1004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001006
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 return 0;
1008}
1009EXPORT_SYMBOL(hci_unregister_dev);
1010
1011/* Suspend HCI device */
1012int hci_suspend_dev(struct hci_dev *hdev)
1013{
1014 hci_notify(hdev, HCI_DEV_SUSPEND);
1015 return 0;
1016}
1017EXPORT_SYMBOL(hci_suspend_dev);
1018
1019/* Resume HCI device */
1020int hci_resume_dev(struct hci_dev *hdev)
1021{
1022 hci_notify(hdev, HCI_DEV_RESUME);
1023 return 0;
1024}
1025EXPORT_SYMBOL(hci_resume_dev);
1026
Marcel Holtmann76bca882009-11-18 00:40:39 +01001027/* Receive frame from HCI drivers */
1028int hci_recv_frame(struct sk_buff *skb)
1029{
1030 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1031 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1032 && !test_bit(HCI_INIT, &hdev->flags))) {
1033 kfree_skb(skb);
1034 return -ENXIO;
1035 }
1036
1037 /* Incomming skb */
1038 bt_cb(skb)->incoming = 1;
1039
1040 /* Time stamp */
1041 __net_timestamp(skb);
1042
1043 /* Queue frame for rx task */
1044 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001045 tasklet_schedule(&hdev->rx_task);
1046
Marcel Holtmann76bca882009-11-18 00:40:39 +01001047 return 0;
1048}
1049EXPORT_SYMBOL(hci_recv_frame);
1050
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301051static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1052 int count, __u8 index, gfp_t gfp_mask)
1053{
1054 int len = 0;
1055 int hlen = 0;
1056 int remain = count;
1057 struct sk_buff *skb;
1058 struct bt_skb_cb *scb;
1059
1060 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1061 index >= NUM_REASSEMBLY)
1062 return -EILSEQ;
1063
1064 skb = hdev->reassembly[index];
1065
1066 if (!skb) {
1067 switch (type) {
1068 case HCI_ACLDATA_PKT:
1069 len = HCI_MAX_FRAME_SIZE;
1070 hlen = HCI_ACL_HDR_SIZE;
1071 break;
1072 case HCI_EVENT_PKT:
1073 len = HCI_MAX_EVENT_SIZE;
1074 hlen = HCI_EVENT_HDR_SIZE;
1075 break;
1076 case HCI_SCODATA_PKT:
1077 len = HCI_MAX_SCO_SIZE;
1078 hlen = HCI_SCO_HDR_SIZE;
1079 break;
1080 }
1081
1082 skb = bt_skb_alloc(len, gfp_mask);
1083 if (!skb)
1084 return -ENOMEM;
1085
1086 scb = (void *) skb->cb;
1087 scb->expect = hlen;
1088 scb->pkt_type = type;
1089
1090 skb->dev = (void *) hdev;
1091 hdev->reassembly[index] = skb;
1092 }
1093
1094 while (count) {
1095 scb = (void *) skb->cb;
1096 len = min(scb->expect, (__u16)count);
1097
1098 memcpy(skb_put(skb, len), data, len);
1099
1100 count -= len;
1101 data += len;
1102 scb->expect -= len;
1103 remain = count;
1104
1105 switch (type) {
1106 case HCI_EVENT_PKT:
1107 if (skb->len == HCI_EVENT_HDR_SIZE) {
1108 struct hci_event_hdr *h = hci_event_hdr(skb);
1109 scb->expect = h->plen;
1110
1111 if (skb_tailroom(skb) < scb->expect) {
1112 kfree_skb(skb);
1113 hdev->reassembly[index] = NULL;
1114 return -ENOMEM;
1115 }
1116 }
1117 break;
1118
1119 case HCI_ACLDATA_PKT:
1120 if (skb->len == HCI_ACL_HDR_SIZE) {
1121 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1122 scb->expect = __le16_to_cpu(h->dlen);
1123
1124 if (skb_tailroom(skb) < scb->expect) {
1125 kfree_skb(skb);
1126 hdev->reassembly[index] = NULL;
1127 return -ENOMEM;
1128 }
1129 }
1130 break;
1131
1132 case HCI_SCODATA_PKT:
1133 if (skb->len == HCI_SCO_HDR_SIZE) {
1134 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1135 scb->expect = h->dlen;
1136
1137 if (skb_tailroom(skb) < scb->expect) {
1138 kfree_skb(skb);
1139 hdev->reassembly[index] = NULL;
1140 return -ENOMEM;
1141 }
1142 }
1143 break;
1144 }
1145
1146 if (scb->expect == 0) {
1147 /* Complete frame */
1148
1149 bt_cb(skb)->pkt_type = type;
1150 hci_recv_frame(skb);
1151
1152 hdev->reassembly[index] = NULL;
1153 return remain;
1154 }
1155 }
1156
1157 return remain;
1158}
1159
Marcel Holtmannef222012007-07-11 06:42:04 +02001160int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1161{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301162 int rem = 0;
1163
Marcel Holtmannef222012007-07-11 06:42:04 +02001164 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1165 return -EILSEQ;
1166
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001167 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301168 rem = hci_reassembly(hdev, type, data, count,
1169 type - 1, GFP_ATOMIC);
1170 if (rem < 0)
1171 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001172
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301173 data += (count - rem);
1174 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001175 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001176
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301177 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001178}
1179EXPORT_SYMBOL(hci_recv_fragment);
1180
Suraj Sumangala99811512010-07-14 13:02:19 +05301181#define STREAM_REASSEMBLY 0
1182
1183int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1184{
1185 int type;
1186 int rem = 0;
1187
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001188 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301189 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1190
1191 if (!skb) {
1192 struct { char type; } *pkt;
1193
1194 /* Start of the frame */
1195 pkt = data;
1196 type = pkt->type;
1197
1198 data++;
1199 count--;
1200 } else
1201 type = bt_cb(skb)->pkt_type;
1202
1203 rem = hci_reassembly(hdev, type, data,
1204 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1205 if (rem < 0)
1206 return rem;
1207
1208 data += (count - rem);
1209 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001210 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301211
1212 return rem;
1213}
1214EXPORT_SYMBOL(hci_recv_stream_fragment);
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216/* ---- Interface to upper protocols ---- */
1217
1218/* Register/Unregister protocols.
1219 * hci_task_lock is used to ensure that no tasks are running. */
1220int hci_register_proto(struct hci_proto *hp)
1221{
1222 int err = 0;
1223
1224 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1225
1226 if (hp->id >= HCI_MAX_PROTO)
1227 return -EINVAL;
1228
1229 write_lock_bh(&hci_task_lock);
1230
1231 if (!hci_proto[hp->id])
1232 hci_proto[hp->id] = hp;
1233 else
1234 err = -EEXIST;
1235
1236 write_unlock_bh(&hci_task_lock);
1237
1238 return err;
1239}
1240EXPORT_SYMBOL(hci_register_proto);
1241
1242int hci_unregister_proto(struct hci_proto *hp)
1243{
1244 int err = 0;
1245
1246 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1247
1248 if (hp->id >= HCI_MAX_PROTO)
1249 return -EINVAL;
1250
1251 write_lock_bh(&hci_task_lock);
1252
1253 if (hci_proto[hp->id])
1254 hci_proto[hp->id] = NULL;
1255 else
1256 err = -ENOENT;
1257
1258 write_unlock_bh(&hci_task_lock);
1259
1260 return err;
1261}
1262EXPORT_SYMBOL(hci_unregister_proto);
1263
1264int hci_register_cb(struct hci_cb *cb)
1265{
1266 BT_DBG("%p name %s", cb, cb->name);
1267
1268 write_lock_bh(&hci_cb_list_lock);
1269 list_add(&cb->list, &hci_cb_list);
1270 write_unlock_bh(&hci_cb_list_lock);
1271
1272 return 0;
1273}
1274EXPORT_SYMBOL(hci_register_cb);
1275
1276int hci_unregister_cb(struct hci_cb *cb)
1277{
1278 BT_DBG("%p name %s", cb, cb->name);
1279
1280 write_lock_bh(&hci_cb_list_lock);
1281 list_del(&cb->list);
1282 write_unlock_bh(&hci_cb_list_lock);
1283
1284 return 0;
1285}
1286EXPORT_SYMBOL(hci_unregister_cb);
1287
1288static int hci_send_frame(struct sk_buff *skb)
1289{
1290 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1291
1292 if (!hdev) {
1293 kfree_skb(skb);
1294 return -ENODEV;
1295 }
1296
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001297 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 if (atomic_read(&hdev->promisc)) {
1300 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001301 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
1303 hci_send_to_sock(hdev, skb);
1304 }
1305
1306 /* Get rid of skb owner, prior to sending to the driver. */
1307 skb_orphan(skb);
1308
1309 return hdev->send(skb);
1310}
1311
1312/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001313int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314{
1315 int len = HCI_COMMAND_HDR_SIZE + plen;
1316 struct hci_command_hdr *hdr;
1317 struct sk_buff *skb;
1318
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001319 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 skb = bt_skb_alloc(len, GFP_ATOMIC);
1322 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001323 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 return -ENOMEM;
1325 }
1326
1327 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001328 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 hdr->plen = plen;
1330
1331 if (plen)
1332 memcpy(skb_put(skb, plen), param, plen);
1333
1334 BT_DBG("skb len %d", skb->len);
1335
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001336 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001338
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001340 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
1342 return 0;
1343}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
1345/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001346void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347{
1348 struct hci_command_hdr *hdr;
1349
1350 if (!hdev->sent_cmd)
1351 return NULL;
1352
1353 hdr = (void *) hdev->sent_cmd->data;
1354
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001355 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 return NULL;
1357
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001358 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
1360 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1361}
1362
1363/* Send ACL data */
1364static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1365{
1366 struct hci_acl_hdr *hdr;
1367 int len = skb->len;
1368
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001369 skb_push(skb, HCI_ACL_HDR_SIZE);
1370 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001371 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001372 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1373 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374}
1375
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001376void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377{
1378 struct hci_dev *hdev = conn->hdev;
1379 struct sk_buff *list;
1380
1381 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1382
1383 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001384 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1386
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001387 list = skb_shinfo(skb)->frag_list;
1388 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 /* Non fragmented */
1390 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1391
1392 skb_queue_tail(&conn->data_q, skb);
1393 } else {
1394 /* Fragmented */
1395 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1396
1397 skb_shinfo(skb)->frag_list = NULL;
1398
1399 /* Queue all fragments atomically */
1400 spin_lock_bh(&conn->data_q.lock);
1401
1402 __skb_queue_tail(&conn->data_q, skb);
1403 do {
1404 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001407 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1409
1410 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1411
1412 __skb_queue_tail(&conn->data_q, skb);
1413 } while (list);
1414
1415 spin_unlock_bh(&conn->data_q.lock);
1416 }
1417
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001418 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419}
1420EXPORT_SYMBOL(hci_send_acl);
1421
1422/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001423void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424{
1425 struct hci_dev *hdev = conn->hdev;
1426 struct hci_sco_hdr hdr;
1427
1428 BT_DBG("%s len %d", hdev->name, skb->len);
1429
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001430 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 hdr.dlen = skb->len;
1432
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001433 skb_push(skb, HCI_SCO_HDR_SIZE);
1434 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001435 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
1437 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001438 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001441 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442}
1443EXPORT_SYMBOL(hci_send_sco);
1444
1445/* ---- HCI TX task (outgoing data) ---- */
1446
1447/* HCI Connection scheduler */
1448static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1449{
1450 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001451 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 int num = 0, min = ~0;
1453 struct list_head *p;
1454
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001455 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 * added and removed with TX task disabled. */
1457 list_for_each(p, &h->list) {
1458 struct hci_conn *c;
1459 c = list_entry(p, struct hci_conn, list);
1460
Marcel Holtmann769be972008-07-14 20:13:49 +02001461 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001463
1464 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1465 continue;
1466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 num++;
1468
1469 if (c->sent < min) {
1470 min = c->sent;
1471 conn = c;
1472 }
1473 }
1474
1475 if (conn) {
1476 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1477 int q = cnt / num;
1478 *quote = q ? q : 1;
1479 } else
1480 *quote = 0;
1481
1482 BT_DBG("conn %p quote %d", conn, *quote);
1483 return conn;
1484}
1485
1486static inline void hci_acl_tx_to(struct hci_dev *hdev)
1487{
1488 struct hci_conn_hash *h = &hdev->conn_hash;
1489 struct list_head *p;
1490 struct hci_conn *c;
1491
1492 BT_ERR("%s ACL tx timeout", hdev->name);
1493
1494 /* Kill stalled connections */
1495 list_for_each(p, &h->list) {
1496 c = list_entry(p, struct hci_conn, list);
1497 if (c->type == ACL_LINK && c->sent) {
1498 BT_ERR("%s killing stalled ACL connection %s",
1499 hdev->name, batostr(&c->dst));
1500 hci_acl_disconn(c, 0x13);
1501 }
1502 }
1503}
1504
1505static inline void hci_sched_acl(struct hci_dev *hdev)
1506{
1507 struct hci_conn *conn;
1508 struct sk_buff *skb;
1509 int quote;
1510
1511 BT_DBG("%s", hdev->name);
1512
1513 if (!test_bit(HCI_RAW, &hdev->flags)) {
1514 /* ACL tx timeout must be longer than maximum
1515 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08001516 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 hci_acl_tx_to(hdev);
1518 }
1519
1520 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1521 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1522 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001523
1524 hci_conn_enter_active_mode(conn);
1525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 hci_send_frame(skb);
1527 hdev->acl_last_tx = jiffies;
1528
1529 hdev->acl_cnt--;
1530 conn->sent++;
1531 }
1532 }
1533}
1534
1535/* Schedule SCO */
1536static inline void hci_sched_sco(struct hci_dev *hdev)
1537{
1538 struct hci_conn *conn;
1539 struct sk_buff *skb;
1540 int quote;
1541
1542 BT_DBG("%s", hdev->name);
1543
1544 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1545 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1546 BT_DBG("skb %p len %d", skb, skb->len);
1547 hci_send_frame(skb);
1548
1549 conn->sent++;
1550 if (conn->sent == ~0)
1551 conn->sent = 0;
1552 }
1553 }
1554}
1555
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001556static inline void hci_sched_esco(struct hci_dev *hdev)
1557{
1558 struct hci_conn *conn;
1559 struct sk_buff *skb;
1560 int quote;
1561
1562 BT_DBG("%s", hdev->name);
1563
1564 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1565 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1566 BT_DBG("skb %p len %d", skb, skb->len);
1567 hci_send_frame(skb);
1568
1569 conn->sent++;
1570 if (conn->sent == ~0)
1571 conn->sent = 0;
1572 }
1573 }
1574}
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576static void hci_tx_task(unsigned long arg)
1577{
1578 struct hci_dev *hdev = (struct hci_dev *) arg;
1579 struct sk_buff *skb;
1580
1581 read_lock(&hci_task_lock);
1582
1583 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1584
1585 /* Schedule queues and send stuff to HCI driver */
1586
1587 hci_sched_acl(hdev);
1588
1589 hci_sched_sco(hdev);
1590
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001591 hci_sched_esco(hdev);
1592
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 /* Send next queued raw (unknown type) packet */
1594 while ((skb = skb_dequeue(&hdev->raw_q)))
1595 hci_send_frame(skb);
1596
1597 read_unlock(&hci_task_lock);
1598}
1599
1600/* ----- HCI RX task (incoming data proccessing) ----- */
1601
1602/* ACL data packet */
1603static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1604{
1605 struct hci_acl_hdr *hdr = (void *) skb->data;
1606 struct hci_conn *conn;
1607 __u16 handle, flags;
1608
1609 skb_pull(skb, HCI_ACL_HDR_SIZE);
1610
1611 handle = __le16_to_cpu(hdr->handle);
1612 flags = hci_flags(handle);
1613 handle = hci_handle(handle);
1614
1615 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1616
1617 hdev->stat.acl_rx++;
1618
1619 hci_dev_lock(hdev);
1620 conn = hci_conn_hash_lookup_handle(hdev, handle);
1621 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 if (conn) {
1624 register struct hci_proto *hp;
1625
Marcel Holtmann04837f62006-07-03 10:02:33 +02001626 hci_conn_enter_active_mode(conn);
1627
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001629 hp = hci_proto[HCI_PROTO_L2CAP];
1630 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 hp->recv_acldata(conn, skb, flags);
1632 return;
1633 }
1634 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001635 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 hdev->name, handle);
1637 }
1638
1639 kfree_skb(skb);
1640}
1641
1642/* SCO data packet */
1643static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1644{
1645 struct hci_sco_hdr *hdr = (void *) skb->data;
1646 struct hci_conn *conn;
1647 __u16 handle;
1648
1649 skb_pull(skb, HCI_SCO_HDR_SIZE);
1650
1651 handle = __le16_to_cpu(hdr->handle);
1652
1653 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1654
1655 hdev->stat.sco_rx++;
1656
1657 hci_dev_lock(hdev);
1658 conn = hci_conn_hash_lookup_handle(hdev, handle);
1659 hci_dev_unlock(hdev);
1660
1661 if (conn) {
1662 register struct hci_proto *hp;
1663
1664 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001665 hp = hci_proto[HCI_PROTO_SCO];
1666 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 hp->recv_scodata(conn, skb);
1668 return;
1669 }
1670 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001671 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 hdev->name, handle);
1673 }
1674
1675 kfree_skb(skb);
1676}
1677
Marcel Holtmann65164552005-10-28 19:20:48 +02001678static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679{
1680 struct hci_dev *hdev = (struct hci_dev *) arg;
1681 struct sk_buff *skb;
1682
1683 BT_DBG("%s", hdev->name);
1684
1685 read_lock(&hci_task_lock);
1686
1687 while ((skb = skb_dequeue(&hdev->rx_q))) {
1688 if (atomic_read(&hdev->promisc)) {
1689 /* Send copy to the sockets */
1690 hci_send_to_sock(hdev, skb);
1691 }
1692
1693 if (test_bit(HCI_RAW, &hdev->flags)) {
1694 kfree_skb(skb);
1695 continue;
1696 }
1697
1698 if (test_bit(HCI_INIT, &hdev->flags)) {
1699 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001700 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 case HCI_ACLDATA_PKT:
1702 case HCI_SCODATA_PKT:
1703 kfree_skb(skb);
1704 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001705 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 }
1707
1708 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001709 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 case HCI_EVENT_PKT:
1711 hci_event_packet(hdev, skb);
1712 break;
1713
1714 case HCI_ACLDATA_PKT:
1715 BT_DBG("%s ACL data packet", hdev->name);
1716 hci_acldata_packet(hdev, skb);
1717 break;
1718
1719 case HCI_SCODATA_PKT:
1720 BT_DBG("%s SCO data packet", hdev->name);
1721 hci_scodata_packet(hdev, skb);
1722 break;
1723
1724 default:
1725 kfree_skb(skb);
1726 break;
1727 }
1728 }
1729
1730 read_unlock(&hci_task_lock);
1731}
1732
1733static void hci_cmd_task(unsigned long arg)
1734{
1735 struct hci_dev *hdev = (struct hci_dev *) arg;
1736 struct sk_buff *skb;
1737
1738 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1739
S.Çağlar Onur824530212008-02-17 23:25:57 -08001740 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 BT_ERR("%s command tx timeout", hdev->name);
1742 atomic_set(&hdev->cmd_cnt, 1);
1743 }
1744
1745 /* Send queued commands */
1746 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001747 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001749 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1750 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 atomic_dec(&hdev->cmd_cnt);
1752 hci_send_frame(skb);
1753 hdev->cmd_last_tx = jiffies;
1754 } else {
1755 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001756 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 }
1758 }
1759}