blob: bf6729a53378347b68208b269aa63846b07141f4 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Johan Hedbergab81cbf2010-12-15 13:53:18 +020053#define AUTO_OFF_TIMEOUT 2000
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200193 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800195 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200196 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200221 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237#endif
238
239 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200254 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700258 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
267{
268 __u8 scan = opt;
269
270 BT_DBG("%s %x", hdev->name, scan);
271
272 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200273 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274}
275
276static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
277{
278 __u8 auth = opt;
279
280 BT_DBG("%s %x", hdev->name, auth);
281
282 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200283 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
285
286static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
287{
288 __u8 encrypt = opt;
289
290 BT_DBG("%s %x", hdev->name, encrypt);
291
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200292 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200293 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294}
295
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200296static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
297{
298 __le16 policy = cpu_to_le16(opt);
299
Marcel Holtmanna418b892008-11-30 12:17:28 +0100300 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200301
302 /* Default link policy */
303 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
304}
305
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900306/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 * Device is held on return. */
308struct hci_dev *hci_dev_get(int index)
309{
310 struct hci_dev *hdev = NULL;
311 struct list_head *p;
312
313 BT_DBG("%d", index);
314
315 if (index < 0)
316 return NULL;
317
318 read_lock(&hci_dev_list_lock);
319 list_for_each(p, &hci_dev_list) {
320 struct hci_dev *d = list_entry(p, struct hci_dev, list);
321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
323 break;
324 }
325 }
326 read_unlock(&hci_dev_list_lock);
327 return hdev;
328}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330/* ---- Inquiry support ---- */
331static void inquiry_cache_flush(struct hci_dev *hdev)
332{
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
335
336 BT_DBG("cache %p", cache);
337
338 cache->list = NULL;
339 while ((e = next)) {
340 next = e->next;
341 kfree(e);
342 }
343}
344
345struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346{
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
349
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
354 break;
355 return e;
356}
357
358void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359{
360 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200361 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200371
372 ie->next = cache->list;
373 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 }
375
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 cache->timestamp = jiffies;
379}
380
381static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382{
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
386 int copied = 0;
387
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
396 info++;
397 }
398
399 BT_DBG("cache %p, copied %d", cache, copied);
400 return copied;
401}
402
403static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404{
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
407
408 BT_DBG("%s", hdev->name);
409
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
411 return;
412
413 /* Start Inquiry */
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418}
419
420int hci_inquiry(void __user *arg)
421{
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
426 long timeo;
427 __u8 *buf;
428
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT;
431
432 if (!(hdev = hci_dev_get(ir.dev_id)))
433 return -ENODEV;
434
435 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900436 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200437 inquiry_cache_empty(hdev) ||
438 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 inquiry_cache_flush(hdev);
440 do_inquiry = 1;
441 }
442 hci_dev_unlock_bh(hdev);
443
Marcel Holtmann04837f62006-07-03 10:02:33 +0200444 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200445
446 if (do_inquiry) {
447 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
448 if (err < 0)
449 goto done;
450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452 /* for unlimited number of responses we will use buffer with 255 entries */
453 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
454
455 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
456 * copy it to the user space.
457 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
459 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 err = -ENOMEM;
461 goto done;
462 }
463
464 hci_dev_lock_bh(hdev);
465 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
466 hci_dev_unlock_bh(hdev);
467
468 BT_DBG("num_rsp %d", ir.num_rsp);
469
470 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
471 ptr += sizeof(ir);
472 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
473 ir.num_rsp))
474 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900475 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 err = -EFAULT;
477
478 kfree(buf);
479
480done:
481 hci_dev_put(hdev);
482 return err;
483}
484
485/* ---- HCI ioctl helpers ---- */
486
487int hci_dev_open(__u16 dev)
488{
489 struct hci_dev *hdev;
490 int ret = 0;
491
492 if (!(hdev = hci_dev_get(dev)))
493 return -ENODEV;
494
495 BT_DBG("%s %p", hdev->name, hdev);
496
497 hci_req_lock(hdev);
498
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200499 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
500 ret = -ERFKILL;
501 goto done;
502 }
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (test_bit(HCI_UP, &hdev->flags)) {
505 ret = -EALREADY;
506 goto done;
507 }
508
509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
510 set_bit(HCI_RAW, &hdev->flags);
511
Marcel Holtmann943da252010-02-13 02:28:41 +0100512 /* Treat all non BR/EDR controllers as raw devices for now */
513 if (hdev->dev_type != HCI_BREDR)
514 set_bit(HCI_RAW, &hdev->flags);
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 if (hdev->open(hdev)) {
517 ret = -EIO;
518 goto done;
519 }
520
521 if (!test_bit(HCI_RAW, &hdev->flags)) {
522 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200524 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200527 ret = __hci_request(hdev, hci_init_req, 0,
528 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
530 clear_bit(HCI_INIT, &hdev->flags);
531 }
532
533 if (!ret) {
534 hci_dev_hold(hdev);
535 set_bit(HCI_UP, &hdev->flags);
536 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200537 if (!test_bit(HCI_SETUP, &hdev->flags))
538 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900539 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 /* Init failed, cleanup */
541 tasklet_kill(&hdev->rx_task);
542 tasklet_kill(&hdev->tx_task);
543 tasklet_kill(&hdev->cmd_task);
544
545 skb_queue_purge(&hdev->cmd_q);
546 skb_queue_purge(&hdev->rx_q);
547
548 if (hdev->flush)
549 hdev->flush(hdev);
550
551 if (hdev->sent_cmd) {
552 kfree_skb(hdev->sent_cmd);
553 hdev->sent_cmd = NULL;
554 }
555
556 hdev->close(hdev);
557 hdev->flags = 0;
558 }
559
560done:
561 hci_req_unlock(hdev);
562 hci_dev_put(hdev);
563 return ret;
564}
565
566static int hci_dev_do_close(struct hci_dev *hdev)
567{
568 BT_DBG("%s %p", hdev->name, hdev);
569
570 hci_req_cancel(hdev, ENODEV);
571 hci_req_lock(hdev);
572
573 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
574 hci_req_unlock(hdev);
575 return 0;
576 }
577
578 /* Kill RX and TX tasks */
579 tasklet_kill(&hdev->rx_task);
580 tasklet_kill(&hdev->tx_task);
581
582 hci_dev_lock_bh(hdev);
583 inquiry_cache_flush(hdev);
584 hci_conn_hash_flush(hdev);
585 hci_dev_unlock_bh(hdev);
586
587 hci_notify(hdev, HCI_DEV_DOWN);
588
589 if (hdev->flush)
590 hdev->flush(hdev);
591
592 /* Reset device */
593 skb_queue_purge(&hdev->cmd_q);
594 atomic_set(&hdev->cmd_cnt, 1);
595 if (!test_bit(HCI_RAW, &hdev->flags)) {
596 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200597 __hci_request(hdev, hci_reset_req, 0,
598 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 clear_bit(HCI_INIT, &hdev->flags);
600 }
601
602 /* Kill cmd task */
603 tasklet_kill(&hdev->cmd_task);
604
605 /* Drop queues */
606 skb_queue_purge(&hdev->rx_q);
607 skb_queue_purge(&hdev->cmd_q);
608 skb_queue_purge(&hdev->raw_q);
609
610 /* Drop last sent command */
611 if (hdev->sent_cmd) {
612 kfree_skb(hdev->sent_cmd);
613 hdev->sent_cmd = NULL;
614 }
615
616 /* After this point our queues are empty
617 * and no tasks are scheduled. */
618 hdev->close(hdev);
619
Johan Hedberg5add6af2010-12-16 10:00:37 +0200620 mgmt_powered(hdev->id, 0);
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 /* Clear flags */
623 hdev->flags = 0;
624
625 hci_req_unlock(hdev);
626
627 hci_dev_put(hdev);
628 return 0;
629}
630
631int hci_dev_close(__u16 dev)
632{
633 struct hci_dev *hdev;
634 int err;
635
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200636 hdev = hci_dev_get(dev);
637 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 return -ENODEV;
639 err = hci_dev_do_close(hdev);
640 hci_dev_put(hdev);
641 return err;
642}
643
644int hci_dev_reset(__u16 dev)
645{
646 struct hci_dev *hdev;
647 int ret = 0;
648
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200649 hdev = hci_dev_get(dev);
650 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 return -ENODEV;
652
653 hci_req_lock(hdev);
654 tasklet_disable(&hdev->tx_task);
655
656 if (!test_bit(HCI_UP, &hdev->flags))
657 goto done;
658
659 /* Drop queues */
660 skb_queue_purge(&hdev->rx_q);
661 skb_queue_purge(&hdev->cmd_q);
662
663 hci_dev_lock_bh(hdev);
664 inquiry_cache_flush(hdev);
665 hci_conn_hash_flush(hdev);
666 hci_dev_unlock_bh(hdev);
667
668 if (hdev->flush)
669 hdev->flush(hdev);
670
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900671 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
673
674 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200675 ret = __hci_request(hdev, hci_reset_req, 0,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678done:
679 tasklet_enable(&hdev->tx_task);
680 hci_req_unlock(hdev);
681 hci_dev_put(hdev);
682 return ret;
683}
684
685int hci_dev_reset_stat(__u16 dev)
686{
687 struct hci_dev *hdev;
688 int ret = 0;
689
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200690 hdev = hci_dev_get(dev);
691 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 return -ENODEV;
693
694 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
695
696 hci_dev_put(hdev);
697
698 return ret;
699}
700
701int hci_dev_cmd(unsigned int cmd, void __user *arg)
702{
703 struct hci_dev *hdev;
704 struct hci_dev_req dr;
705 int err = 0;
706
707 if (copy_from_user(&dr, arg, sizeof(dr)))
708 return -EFAULT;
709
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200710 hdev = hci_dev_get(dr.dev_id);
711 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 return -ENODEV;
713
714 switch (cmd) {
715 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200716 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
717 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 break;
719
720 case HCISETENCRYPT:
721 if (!lmp_encrypt_capable(hdev)) {
722 err = -EOPNOTSUPP;
723 break;
724 }
725
726 if (!test_bit(HCI_AUTH, &hdev->flags)) {
727 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200728 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
729 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 if (err)
731 break;
732 }
733
Marcel Holtmann04837f62006-07-03 10:02:33 +0200734 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
735 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 break;
737
738 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200739 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
740 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 break;
742
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200743 case HCISETLINKPOL:
744 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
745 msecs_to_jiffies(HCI_INIT_TIMEOUT));
746 break;
747
748 case HCISETLINKMODE:
749 hdev->link_mode = ((__u16) dr.dev_opt) &
750 (HCI_LM_MASTER | HCI_LM_ACCEPT);
751 break;
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 case HCISETPTYPE:
754 hdev->pkt_type = (__u16) dr.dev_opt;
755 break;
756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200758 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
759 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 break;
761
762 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200763 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
764 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 break;
766
767 default:
768 err = -EINVAL;
769 break;
770 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200771
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 hci_dev_put(hdev);
773 return err;
774}
775
776int hci_get_dev_list(void __user *arg)
777{
778 struct hci_dev_list_req *dl;
779 struct hci_dev_req *dr;
780 struct list_head *p;
781 int n = 0, size, err;
782 __u16 dev_num;
783
784 if (get_user(dev_num, (__u16 __user *) arg))
785 return -EFAULT;
786
787 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
788 return -EINVAL;
789
790 size = sizeof(*dl) + dev_num * sizeof(*dr);
791
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200792 dl = kzalloc(size, GFP_KERNEL);
793 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 return -ENOMEM;
795
796 dr = dl->dev_req;
797
798 read_lock_bh(&hci_dev_list_lock);
799 list_for_each(p, &hci_dev_list) {
800 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200803
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200804 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200805
806 if (!test_bit(HCI_MGMT, &hdev->flags))
807 set_bit(HCI_PAIRABLE, &hdev->flags);
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 (dr + n)->dev_id = hdev->id;
810 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 if (++n >= dev_num)
813 break;
814 }
815 read_unlock_bh(&hci_dev_list_lock);
816
817 dl->dev_num = n;
818 size = sizeof(*dl) + n * sizeof(*dr);
819
820 err = copy_to_user(arg, dl, size);
821 kfree(dl);
822
823 return err ? -EFAULT : 0;
824}
825
826int hci_get_dev_info(void __user *arg)
827{
828 struct hci_dev *hdev;
829 struct hci_dev_info di;
830 int err = 0;
831
832 if (copy_from_user(&di, arg, sizeof(di)))
833 return -EFAULT;
834
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200835 hdev = hci_dev_get(di.dev_id);
836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return -ENODEV;
838
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200839 hci_del_off_timer(hdev);
840
Johan Hedbergc542a062011-01-26 13:11:03 +0200841 if (!test_bit(HCI_MGMT, &hdev->flags))
842 set_bit(HCI_PAIRABLE, &hdev->flags);
843
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 strcpy(di.name, hdev->name);
845 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100846 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 di.flags = hdev->flags;
848 di.pkt_type = hdev->pkt_type;
849 di.acl_mtu = hdev->acl_mtu;
850 di.acl_pkts = hdev->acl_pkts;
851 di.sco_mtu = hdev->sco_mtu;
852 di.sco_pkts = hdev->sco_pkts;
853 di.link_policy = hdev->link_policy;
854 di.link_mode = hdev->link_mode;
855
856 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857 memcpy(&di.features, &hdev->features, sizeof(di.features));
858
859 if (copy_to_user(arg, &di, sizeof(di)))
860 err = -EFAULT;
861
862 hci_dev_put(hdev);
863
864 return err;
865}
866
867/* ---- Interface to HCI drivers ---- */
868
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200869static int hci_rfkill_set_block(void *data, bool blocked)
870{
871 struct hci_dev *hdev = data;
872
873 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
874
875 if (!blocked)
876 return 0;
877
878 hci_dev_do_close(hdev);
879
880 return 0;
881}
882
883static const struct rfkill_ops hci_rfkill_ops = {
884 .set_block = hci_rfkill_set_block,
885};
886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887/* Alloc HCI device */
888struct hci_dev *hci_alloc_dev(void)
889{
890 struct hci_dev *hdev;
891
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200892 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 if (!hdev)
894 return NULL;
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 skb_queue_head_init(&hdev->driver_init);
897
898 return hdev;
899}
900EXPORT_SYMBOL(hci_alloc_dev);
901
902/* Free HCI device */
903void hci_free_dev(struct hci_dev *hdev)
904{
905 skb_queue_purge(&hdev->driver_init);
906
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200907 /* will free via device release */
908 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909}
910EXPORT_SYMBOL(hci_free_dev);
911
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200912static void hci_power_on(struct work_struct *work)
913{
914 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
915
916 BT_DBG("%s", hdev->name);
917
918 if (hci_dev_open(hdev->id) < 0)
919 return;
920
921 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
922 mod_timer(&hdev->off_timer,
923 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
924
925 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
926 mgmt_index_added(hdev->id);
927}
928
929static void hci_power_off(struct work_struct *work)
930{
931 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
932
933 BT_DBG("%s", hdev->name);
934
935 hci_dev_close(hdev->id);
936}
937
938static void hci_auto_off(unsigned long data)
939{
940 struct hci_dev *hdev = (struct hci_dev *) data;
941
942 BT_DBG("%s", hdev->name);
943
944 clear_bit(HCI_AUTO_OFF, &hdev->flags);
945
946 queue_work(hdev->workqueue, &hdev->power_off);
947}
948
949void hci_del_off_timer(struct hci_dev *hdev)
950{
951 BT_DBG("%s", hdev->name);
952
953 clear_bit(HCI_AUTO_OFF, &hdev->flags);
954 del_timer(&hdev->off_timer);
955}
956
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200957int hci_uuids_clear(struct hci_dev *hdev)
958{
959 struct list_head *p, *n;
960
961 list_for_each_safe(p, n, &hdev->uuids) {
962 struct bt_uuid *uuid;
963
964 uuid = list_entry(p, struct bt_uuid, list);
965
966 list_del(p);
967 kfree(uuid);
968 }
969
970 return 0;
971}
972
Johan Hedberg55ed8ca12011-01-17 14:41:05 +0200973int hci_link_keys_clear(struct hci_dev *hdev)
974{
975 struct list_head *p, *n;
976
977 list_for_each_safe(p, n, &hdev->link_keys) {
978 struct link_key *key;
979
980 key = list_entry(p, struct link_key, list);
981
982 list_del(p);
983 kfree(key);
984 }
985
986 return 0;
987}
988
989struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
990{
991 struct list_head *p;
992
993 list_for_each(p, &hdev->link_keys) {
994 struct link_key *k;
995
996 k = list_entry(p, struct link_key, list);
997
998 if (bacmp(bdaddr, &k->bdaddr) == 0)
999 return k;
1000 }
1001
1002 return NULL;
1003}
1004
1005int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1006 u8 *val, u8 type, u8 pin_len)
1007{
1008 struct link_key *key, *old_key;
1009 u8 old_key_type;
1010
1011 old_key = hci_find_link_key(hdev, bdaddr);
1012 if (old_key) {
1013 old_key_type = old_key->type;
1014 key = old_key;
1015 } else {
1016 old_key_type = 0xff;
1017 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1018 if (!key)
1019 return -ENOMEM;
1020 list_add(&key->list, &hdev->link_keys);
1021 }
1022
1023 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1024
1025 bacpy(&key->bdaddr, bdaddr);
1026 memcpy(key->val, val, 16);
1027 key->type = type;
1028 key->pin_len = pin_len;
1029
1030 if (new_key)
1031 mgmt_new_key(hdev->id, key, old_key_type);
1032
1033 if (type == 0x06)
1034 key->type = old_key_type;
1035
1036 return 0;
1037}
1038
1039int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1040{
1041 struct link_key *key;
1042
1043 key = hci_find_link_key(hdev, bdaddr);
1044 if (!key)
1045 return -ENOENT;
1046
1047 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1048
1049 list_del(&key->list);
1050 kfree(key);
1051
1052 return 0;
1053}
1054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055/* Register HCI device */
1056int hci_register_dev(struct hci_dev *hdev)
1057{
1058 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001059 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001061 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1062 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
1064 if (!hdev->open || !hdev->close || !hdev->destruct)
1065 return -EINVAL;
1066
1067 write_lock_bh(&hci_dev_list_lock);
1068
1069 /* Find first available device id */
1070 list_for_each(p, &hci_dev_list) {
1071 if (list_entry(p, struct hci_dev, list)->id != id)
1072 break;
1073 head = p; id++;
1074 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001075
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 sprintf(hdev->name, "hci%d", id);
1077 hdev->id = id;
1078 list_add(&hdev->list, head);
1079
1080 atomic_set(&hdev->refcnt, 1);
1081 spin_lock_init(&hdev->lock);
1082
1083 hdev->flags = 0;
1084 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001085 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001087 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
Marcel Holtmann04837f62006-07-03 10:02:33 +02001089 hdev->idle_timeout = 0;
1090 hdev->sniff_max_interval = 800;
1091 hdev->sniff_min_interval = 80;
1092
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001093 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1095 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1096
1097 skb_queue_head_init(&hdev->rx_q);
1098 skb_queue_head_init(&hdev->cmd_q);
1099 skb_queue_head_init(&hdev->raw_q);
1100
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301101 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001102 hdev->reassembly[i] = NULL;
1103
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001105 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107 inquiry_cache_init(hdev);
1108
1109 hci_conn_hash_init(hdev);
1110
David Millerea4bd8b2010-07-30 21:54:49 -07001111 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001112
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001113 INIT_LIST_HEAD(&hdev->uuids);
1114
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001115 INIT_LIST_HEAD(&hdev->link_keys);
1116
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001117 INIT_WORK(&hdev->power_on, hci_power_on);
1118 INIT_WORK(&hdev->power_off, hci_power_off);
1119 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1122
1123 atomic_set(&hdev->promisc, 0);
1124
1125 write_unlock_bh(&hci_dev_list_lock);
1126
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001127 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1128 if (!hdev->workqueue)
1129 goto nomem;
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 hci_register_sysfs(hdev);
1132
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001133 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1134 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1135 if (hdev->rfkill) {
1136 if (rfkill_register(hdev->rfkill) < 0) {
1137 rfkill_destroy(hdev->rfkill);
1138 hdev->rfkill = NULL;
1139 }
1140 }
1141
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001142 set_bit(HCI_AUTO_OFF, &hdev->flags);
1143 set_bit(HCI_SETUP, &hdev->flags);
1144 queue_work(hdev->workqueue, &hdev->power_on);
1145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 hci_notify(hdev, HCI_DEV_REG);
1147
1148 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001149
1150nomem:
1151 write_lock_bh(&hci_dev_list_lock);
1152 list_del(&hdev->list);
1153 write_unlock_bh(&hci_dev_list_lock);
1154
1155 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156}
1157EXPORT_SYMBOL(hci_register_dev);
1158
1159/* Unregister HCI device */
1160int hci_unregister_dev(struct hci_dev *hdev)
1161{
Marcel Holtmannef222012007-07-11 06:42:04 +02001162 int i;
1163
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001164 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 write_lock_bh(&hci_dev_list_lock);
1167 list_del(&hdev->list);
1168 write_unlock_bh(&hci_dev_list_lock);
1169
1170 hci_dev_do_close(hdev);
1171
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301172 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001173 kfree_skb(hdev->reassembly[i]);
1174
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001175 if (!test_bit(HCI_INIT, &hdev->flags) &&
1176 !test_bit(HCI_SETUP, &hdev->flags))
1177 mgmt_index_removed(hdev->id);
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 hci_notify(hdev, HCI_DEV_UNREG);
1180
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001181 if (hdev->rfkill) {
1182 rfkill_unregister(hdev->rfkill);
1183 rfkill_destroy(hdev->rfkill);
1184 }
1185
Dave Young147e2d52008-03-05 18:45:59 -08001186 hci_unregister_sysfs(hdev);
1187
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001188 destroy_workqueue(hdev->workqueue);
1189
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001190 hci_dev_lock_bh(hdev);
1191 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001192 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001193 hci_link_keys_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001194 hci_dev_unlock_bh(hdev);
1195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 return 0;
1199}
1200EXPORT_SYMBOL(hci_unregister_dev);
1201
1202/* Suspend HCI device */
1203int hci_suspend_dev(struct hci_dev *hdev)
1204{
1205 hci_notify(hdev, HCI_DEV_SUSPEND);
1206 return 0;
1207}
1208EXPORT_SYMBOL(hci_suspend_dev);
1209
1210/* Resume HCI device */
1211int hci_resume_dev(struct hci_dev *hdev)
1212{
1213 hci_notify(hdev, HCI_DEV_RESUME);
1214 return 0;
1215}
1216EXPORT_SYMBOL(hci_resume_dev);
1217
Marcel Holtmann76bca882009-11-18 00:40:39 +01001218/* Receive frame from HCI drivers */
1219int hci_recv_frame(struct sk_buff *skb)
1220{
1221 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1222 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1223 && !test_bit(HCI_INIT, &hdev->flags))) {
1224 kfree_skb(skb);
1225 return -ENXIO;
1226 }
1227
1228 /* Incomming skb */
1229 bt_cb(skb)->incoming = 1;
1230
1231 /* Time stamp */
1232 __net_timestamp(skb);
1233
1234 /* Queue frame for rx task */
1235 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001236 tasklet_schedule(&hdev->rx_task);
1237
Marcel Holtmann76bca882009-11-18 00:40:39 +01001238 return 0;
1239}
1240EXPORT_SYMBOL(hci_recv_frame);
1241
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301242static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1243 int count, __u8 index, gfp_t gfp_mask)
1244{
1245 int len = 0;
1246 int hlen = 0;
1247 int remain = count;
1248 struct sk_buff *skb;
1249 struct bt_skb_cb *scb;
1250
1251 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1252 index >= NUM_REASSEMBLY)
1253 return -EILSEQ;
1254
1255 skb = hdev->reassembly[index];
1256
1257 if (!skb) {
1258 switch (type) {
1259 case HCI_ACLDATA_PKT:
1260 len = HCI_MAX_FRAME_SIZE;
1261 hlen = HCI_ACL_HDR_SIZE;
1262 break;
1263 case HCI_EVENT_PKT:
1264 len = HCI_MAX_EVENT_SIZE;
1265 hlen = HCI_EVENT_HDR_SIZE;
1266 break;
1267 case HCI_SCODATA_PKT:
1268 len = HCI_MAX_SCO_SIZE;
1269 hlen = HCI_SCO_HDR_SIZE;
1270 break;
1271 }
1272
1273 skb = bt_skb_alloc(len, gfp_mask);
1274 if (!skb)
1275 return -ENOMEM;
1276
1277 scb = (void *) skb->cb;
1278 scb->expect = hlen;
1279 scb->pkt_type = type;
1280
1281 skb->dev = (void *) hdev;
1282 hdev->reassembly[index] = skb;
1283 }
1284
1285 while (count) {
1286 scb = (void *) skb->cb;
1287 len = min(scb->expect, (__u16)count);
1288
1289 memcpy(skb_put(skb, len), data, len);
1290
1291 count -= len;
1292 data += len;
1293 scb->expect -= len;
1294 remain = count;
1295
1296 switch (type) {
1297 case HCI_EVENT_PKT:
1298 if (skb->len == HCI_EVENT_HDR_SIZE) {
1299 struct hci_event_hdr *h = hci_event_hdr(skb);
1300 scb->expect = h->plen;
1301
1302 if (skb_tailroom(skb) < scb->expect) {
1303 kfree_skb(skb);
1304 hdev->reassembly[index] = NULL;
1305 return -ENOMEM;
1306 }
1307 }
1308 break;
1309
1310 case HCI_ACLDATA_PKT:
1311 if (skb->len == HCI_ACL_HDR_SIZE) {
1312 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1313 scb->expect = __le16_to_cpu(h->dlen);
1314
1315 if (skb_tailroom(skb) < scb->expect) {
1316 kfree_skb(skb);
1317 hdev->reassembly[index] = NULL;
1318 return -ENOMEM;
1319 }
1320 }
1321 break;
1322
1323 case HCI_SCODATA_PKT:
1324 if (skb->len == HCI_SCO_HDR_SIZE) {
1325 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1326 scb->expect = h->dlen;
1327
1328 if (skb_tailroom(skb) < scb->expect) {
1329 kfree_skb(skb);
1330 hdev->reassembly[index] = NULL;
1331 return -ENOMEM;
1332 }
1333 }
1334 break;
1335 }
1336
1337 if (scb->expect == 0) {
1338 /* Complete frame */
1339
1340 bt_cb(skb)->pkt_type = type;
1341 hci_recv_frame(skb);
1342
1343 hdev->reassembly[index] = NULL;
1344 return remain;
1345 }
1346 }
1347
1348 return remain;
1349}
1350
Marcel Holtmannef222012007-07-11 06:42:04 +02001351int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1352{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301353 int rem = 0;
1354
Marcel Holtmannef222012007-07-11 06:42:04 +02001355 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1356 return -EILSEQ;
1357
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001358 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301359 rem = hci_reassembly(hdev, type, data, count,
1360 type - 1, GFP_ATOMIC);
1361 if (rem < 0)
1362 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001363
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301364 data += (count - rem);
1365 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001366 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001367
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301368 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001369}
1370EXPORT_SYMBOL(hci_recv_fragment);
1371
Suraj Sumangala99811512010-07-14 13:02:19 +05301372#define STREAM_REASSEMBLY 0
1373
1374int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1375{
1376 int type;
1377 int rem = 0;
1378
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001379 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301380 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1381
1382 if (!skb) {
1383 struct { char type; } *pkt;
1384
1385 /* Start of the frame */
1386 pkt = data;
1387 type = pkt->type;
1388
1389 data++;
1390 count--;
1391 } else
1392 type = bt_cb(skb)->pkt_type;
1393
1394 rem = hci_reassembly(hdev, type, data,
1395 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1396 if (rem < 0)
1397 return rem;
1398
1399 data += (count - rem);
1400 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001401 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301402
1403 return rem;
1404}
1405EXPORT_SYMBOL(hci_recv_stream_fragment);
1406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407/* ---- Interface to upper protocols ---- */
1408
1409/* Register/Unregister protocols.
1410 * hci_task_lock is used to ensure that no tasks are running. */
1411int hci_register_proto(struct hci_proto *hp)
1412{
1413 int err = 0;
1414
1415 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1416
1417 if (hp->id >= HCI_MAX_PROTO)
1418 return -EINVAL;
1419
1420 write_lock_bh(&hci_task_lock);
1421
1422 if (!hci_proto[hp->id])
1423 hci_proto[hp->id] = hp;
1424 else
1425 err = -EEXIST;
1426
1427 write_unlock_bh(&hci_task_lock);
1428
1429 return err;
1430}
1431EXPORT_SYMBOL(hci_register_proto);
1432
1433int hci_unregister_proto(struct hci_proto *hp)
1434{
1435 int err = 0;
1436
1437 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1438
1439 if (hp->id >= HCI_MAX_PROTO)
1440 return -EINVAL;
1441
1442 write_lock_bh(&hci_task_lock);
1443
1444 if (hci_proto[hp->id])
1445 hci_proto[hp->id] = NULL;
1446 else
1447 err = -ENOENT;
1448
1449 write_unlock_bh(&hci_task_lock);
1450
1451 return err;
1452}
1453EXPORT_SYMBOL(hci_unregister_proto);
1454
1455int hci_register_cb(struct hci_cb *cb)
1456{
1457 BT_DBG("%p name %s", cb, cb->name);
1458
1459 write_lock_bh(&hci_cb_list_lock);
1460 list_add(&cb->list, &hci_cb_list);
1461 write_unlock_bh(&hci_cb_list_lock);
1462
1463 return 0;
1464}
1465EXPORT_SYMBOL(hci_register_cb);
1466
1467int hci_unregister_cb(struct hci_cb *cb)
1468{
1469 BT_DBG("%p name %s", cb, cb->name);
1470
1471 write_lock_bh(&hci_cb_list_lock);
1472 list_del(&cb->list);
1473 write_unlock_bh(&hci_cb_list_lock);
1474
1475 return 0;
1476}
1477EXPORT_SYMBOL(hci_unregister_cb);
1478
1479static int hci_send_frame(struct sk_buff *skb)
1480{
1481 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1482
1483 if (!hdev) {
1484 kfree_skb(skb);
1485 return -ENODEV;
1486 }
1487
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001488 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489
1490 if (atomic_read(&hdev->promisc)) {
1491 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001492 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001494 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 }
1496
1497 /* Get rid of skb owner, prior to sending to the driver. */
1498 skb_orphan(skb);
1499
1500 return hdev->send(skb);
1501}
1502
1503/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001504int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505{
1506 int len = HCI_COMMAND_HDR_SIZE + plen;
1507 struct hci_command_hdr *hdr;
1508 struct sk_buff *skb;
1509
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001510 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511
1512 skb = bt_skb_alloc(len, GFP_ATOMIC);
1513 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001514 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 return -ENOMEM;
1516 }
1517
1518 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001519 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 hdr->plen = plen;
1521
1522 if (plen)
1523 memcpy(skb_put(skb, plen), param, plen);
1524
1525 BT_DBG("skb len %d", skb->len);
1526
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001527 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001529
Johan Hedberga5040ef2011-01-10 13:28:59 +02001530 if (test_bit(HCI_INIT, &hdev->flags))
1531 hdev->init_last_cmd = opcode;
1532
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001534 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
1536 return 0;
1537}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
1539/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001540void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541{
1542 struct hci_command_hdr *hdr;
1543
1544 if (!hdev->sent_cmd)
1545 return NULL;
1546
1547 hdr = (void *) hdev->sent_cmd->data;
1548
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001549 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 return NULL;
1551
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001552 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
1554 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1555}
1556
1557/* Send ACL data */
1558static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1559{
1560 struct hci_acl_hdr *hdr;
1561 int len = skb->len;
1562
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001563 skb_push(skb, HCI_ACL_HDR_SIZE);
1564 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001565 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001566 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1567 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568}
1569
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001570void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
1572 struct hci_dev *hdev = conn->hdev;
1573 struct sk_buff *list;
1574
1575 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1576
1577 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001578 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001579 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001581 list = skb_shinfo(skb)->frag_list;
1582 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 /* Non fragmented */
1584 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1585
1586 skb_queue_tail(&conn->data_q, skb);
1587 } else {
1588 /* Fragmented */
1589 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1590
1591 skb_shinfo(skb)->frag_list = NULL;
1592
1593 /* Queue all fragments atomically */
1594 spin_lock_bh(&conn->data_q.lock);
1595
1596 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001597
1598 flags &= ~ACL_START;
1599 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 do {
1601 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001602
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001604 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001605 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
1607 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1608
1609 __skb_queue_tail(&conn->data_q, skb);
1610 } while (list);
1611
1612 spin_unlock_bh(&conn->data_q.lock);
1613 }
1614
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001615 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616}
1617EXPORT_SYMBOL(hci_send_acl);
1618
1619/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001620void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621{
1622 struct hci_dev *hdev = conn->hdev;
1623 struct hci_sco_hdr hdr;
1624
1625 BT_DBG("%s len %d", hdev->name, skb->len);
1626
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001627 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 hdr.dlen = skb->len;
1629
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001630 skb_push(skb, HCI_SCO_HDR_SIZE);
1631 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001632 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001635 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001636
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001638 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639}
1640EXPORT_SYMBOL(hci_send_sco);
1641
1642/* ---- HCI TX task (outgoing data) ---- */
1643
1644/* HCI Connection scheduler */
1645static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1646{
1647 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001648 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 int num = 0, min = ~0;
1650 struct list_head *p;
1651
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001652 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 * added and removed with TX task disabled. */
1654 list_for_each(p, &h->list) {
1655 struct hci_conn *c;
1656 c = list_entry(p, struct hci_conn, list);
1657
Marcel Holtmann769be972008-07-14 20:13:49 +02001658 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001660
1661 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1662 continue;
1663
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 num++;
1665
1666 if (c->sent < min) {
1667 min = c->sent;
1668 conn = c;
1669 }
1670 }
1671
1672 if (conn) {
1673 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1674 int q = cnt / num;
1675 *quote = q ? q : 1;
1676 } else
1677 *quote = 0;
1678
1679 BT_DBG("conn %p quote %d", conn, *quote);
1680 return conn;
1681}
1682
1683static inline void hci_acl_tx_to(struct hci_dev *hdev)
1684{
1685 struct hci_conn_hash *h = &hdev->conn_hash;
1686 struct list_head *p;
1687 struct hci_conn *c;
1688
1689 BT_ERR("%s ACL tx timeout", hdev->name);
1690
1691 /* Kill stalled connections */
1692 list_for_each(p, &h->list) {
1693 c = list_entry(p, struct hci_conn, list);
1694 if (c->type == ACL_LINK && c->sent) {
1695 BT_ERR("%s killing stalled ACL connection %s",
1696 hdev->name, batostr(&c->dst));
1697 hci_acl_disconn(c, 0x13);
1698 }
1699 }
1700}
1701
1702static inline void hci_sched_acl(struct hci_dev *hdev)
1703{
1704 struct hci_conn *conn;
1705 struct sk_buff *skb;
1706 int quote;
1707
1708 BT_DBG("%s", hdev->name);
1709
1710 if (!test_bit(HCI_RAW, &hdev->flags)) {
1711 /* ACL tx timeout must be longer than maximum
1712 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08001713 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 hci_acl_tx_to(hdev);
1715 }
1716
1717 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1718 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1719 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001720
1721 hci_conn_enter_active_mode(conn);
1722
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 hci_send_frame(skb);
1724 hdev->acl_last_tx = jiffies;
1725
1726 hdev->acl_cnt--;
1727 conn->sent++;
1728 }
1729 }
1730}
1731
1732/* Schedule SCO */
1733static inline void hci_sched_sco(struct hci_dev *hdev)
1734{
1735 struct hci_conn *conn;
1736 struct sk_buff *skb;
1737 int quote;
1738
1739 BT_DBG("%s", hdev->name);
1740
1741 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1742 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1743 BT_DBG("skb %p len %d", skb, skb->len);
1744 hci_send_frame(skb);
1745
1746 conn->sent++;
1747 if (conn->sent == ~0)
1748 conn->sent = 0;
1749 }
1750 }
1751}
1752
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001753static inline void hci_sched_esco(struct hci_dev *hdev)
1754{
1755 struct hci_conn *conn;
1756 struct sk_buff *skb;
1757 int quote;
1758
1759 BT_DBG("%s", hdev->name);
1760
1761 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1762 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1763 BT_DBG("skb %p len %d", skb, skb->len);
1764 hci_send_frame(skb);
1765
1766 conn->sent++;
1767 if (conn->sent == ~0)
1768 conn->sent = 0;
1769 }
1770 }
1771}
1772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773static void hci_tx_task(unsigned long arg)
1774{
1775 struct hci_dev *hdev = (struct hci_dev *) arg;
1776 struct sk_buff *skb;
1777
1778 read_lock(&hci_task_lock);
1779
1780 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1781
1782 /* Schedule queues and send stuff to HCI driver */
1783
1784 hci_sched_acl(hdev);
1785
1786 hci_sched_sco(hdev);
1787
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001788 hci_sched_esco(hdev);
1789
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 /* Send next queued raw (unknown type) packet */
1791 while ((skb = skb_dequeue(&hdev->raw_q)))
1792 hci_send_frame(skb);
1793
1794 read_unlock(&hci_task_lock);
1795}
1796
1797/* ----- HCI RX task (incoming data proccessing) ----- */
1798
1799/* ACL data packet */
1800static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1801{
1802 struct hci_acl_hdr *hdr = (void *) skb->data;
1803 struct hci_conn *conn;
1804 __u16 handle, flags;
1805
1806 skb_pull(skb, HCI_ACL_HDR_SIZE);
1807
1808 handle = __le16_to_cpu(hdr->handle);
1809 flags = hci_flags(handle);
1810 handle = hci_handle(handle);
1811
1812 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1813
1814 hdev->stat.acl_rx++;
1815
1816 hci_dev_lock(hdev);
1817 conn = hci_conn_hash_lookup_handle(hdev, handle);
1818 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001819
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 if (conn) {
1821 register struct hci_proto *hp;
1822
Marcel Holtmann04837f62006-07-03 10:02:33 +02001823 hci_conn_enter_active_mode(conn);
1824
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001826 hp = hci_proto[HCI_PROTO_L2CAP];
1827 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 hp->recv_acldata(conn, skb, flags);
1829 return;
1830 }
1831 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001832 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 hdev->name, handle);
1834 }
1835
1836 kfree_skb(skb);
1837}
1838
1839/* SCO data packet */
1840static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1841{
1842 struct hci_sco_hdr *hdr = (void *) skb->data;
1843 struct hci_conn *conn;
1844 __u16 handle;
1845
1846 skb_pull(skb, HCI_SCO_HDR_SIZE);
1847
1848 handle = __le16_to_cpu(hdr->handle);
1849
1850 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1851
1852 hdev->stat.sco_rx++;
1853
1854 hci_dev_lock(hdev);
1855 conn = hci_conn_hash_lookup_handle(hdev, handle);
1856 hci_dev_unlock(hdev);
1857
1858 if (conn) {
1859 register struct hci_proto *hp;
1860
1861 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001862 hp = hci_proto[HCI_PROTO_SCO];
1863 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 hp->recv_scodata(conn, skb);
1865 return;
1866 }
1867 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001868 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 hdev->name, handle);
1870 }
1871
1872 kfree_skb(skb);
1873}
1874
Marcel Holtmann65164552005-10-28 19:20:48 +02001875static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876{
1877 struct hci_dev *hdev = (struct hci_dev *) arg;
1878 struct sk_buff *skb;
1879
1880 BT_DBG("%s", hdev->name);
1881
1882 read_lock(&hci_task_lock);
1883
1884 while ((skb = skb_dequeue(&hdev->rx_q))) {
1885 if (atomic_read(&hdev->promisc)) {
1886 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001887 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 }
1889
1890 if (test_bit(HCI_RAW, &hdev->flags)) {
1891 kfree_skb(skb);
1892 continue;
1893 }
1894
1895 if (test_bit(HCI_INIT, &hdev->flags)) {
1896 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001897 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 case HCI_ACLDATA_PKT:
1899 case HCI_SCODATA_PKT:
1900 kfree_skb(skb);
1901 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 }
1904
1905 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001906 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 case HCI_EVENT_PKT:
1908 hci_event_packet(hdev, skb);
1909 break;
1910
1911 case HCI_ACLDATA_PKT:
1912 BT_DBG("%s ACL data packet", hdev->name);
1913 hci_acldata_packet(hdev, skb);
1914 break;
1915
1916 case HCI_SCODATA_PKT:
1917 BT_DBG("%s SCO data packet", hdev->name);
1918 hci_scodata_packet(hdev, skb);
1919 break;
1920
1921 default:
1922 kfree_skb(skb);
1923 break;
1924 }
1925 }
1926
1927 read_unlock(&hci_task_lock);
1928}
1929
1930static void hci_cmd_task(unsigned long arg)
1931{
1932 struct hci_dev *hdev = (struct hci_dev *) arg;
1933 struct sk_buff *skb;
1934
1935 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1936
S.Çağlar Onur824530212008-02-17 23:25:57 -08001937 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 BT_ERR("%s command tx timeout", hdev->name);
1939 atomic_set(&hdev->cmd_cnt, 1);
1940 }
1941
1942 /* Send queued commands */
1943 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001944 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001946 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1947 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 atomic_dec(&hdev->cmd_cnt);
1949 hci_send_frame(skb);
1950 hdev->cmd_last_tx = jiffies;
1951 } else {
1952 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001953 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 }
1955 }
1956}