blob: cc52e037440e9de5d9c29b43ec2601f0e0e0fff7 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Marcel Holtmannb78752c2010-08-08 23:06:53 -040057static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020058static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020059static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* ---- HCI notifications ---- */
70
Marcel Holtmann65164552005-10-28 19:20:48 +020071static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Marcel Holtmann040030e2012-02-20 14:50:37 +010073 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76/* ---- HCI requests ---- */
77
Johan Hedberg23bb5762010-12-21 23:01:27 +020078void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Johan Hedberg23bb5762010-12-21 23:01:27 +020080 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
Johan Hedberga5040ef2011-01-10 13:28:59 +020082 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +020086 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88 if (hdev->req_status == HCI_REQ_PEND) {
89 hdev->req_result = result;
90 hdev->req_status = HCI_REQ_DONE;
91 wake_up_interruptible(&hdev->req_wait_q);
92 }
93}
94
95static void hci_req_cancel(struct hci_dev *hdev, int err)
96{
97 BT_DBG("%s err 0x%2.2x", hdev->name, err);
98
99 if (hdev->req_status == HCI_REQ_PEND) {
100 hdev->req_result = err;
101 hdev->req_status = HCI_REQ_CANCELED;
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
106/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900107static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100108 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
110 DECLARE_WAITQUEUE(wait, current);
111 int err = 0;
112
113 BT_DBG("%s start", hdev->name);
114
115 hdev->req_status = HCI_REQ_PEND;
116
117 add_wait_queue(&hdev->req_wait_q, &wait);
118 set_current_state(TASK_INTERRUPTIBLE);
119
120 req(hdev, opt);
121 schedule_timeout(timeout);
122
123 remove_wait_queue(&hdev->req_wait_q, &wait);
124
125 if (signal_pending(current))
126 return -EINTR;
127
128 switch (hdev->req_status) {
129 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700130 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 break;
132
133 case HCI_REQ_CANCELED:
134 err = -hdev->req_result;
135 break;
136
137 default:
138 err = -ETIMEDOUT;
139 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Johan Hedberga5040ef2011-01-10 13:28:59 +0200142 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 BT_DBG("%s end: err %d", hdev->name, err);
145
146 return err;
147}
148
149static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100150 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
152 int ret;
153
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200154 if (!test_bit(HCI_UP, &hdev->flags))
155 return -ENETDOWN;
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 /* Serialize all requests */
158 hci_req_lock(hdev);
159 ret = __hci_request(hdev, req, opt, timeout);
160 hci_req_unlock(hdev);
161
162 return ret;
163}
164
165static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
166{
167 BT_DBG("%s %ld", hdev->name, opt);
168
169 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300170 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200171 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
173
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200174static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200176 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800177 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200178 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200180 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Mandatory initialization */
183
184 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300185 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200193 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200200 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201
202 /* Read Class of Device */
203 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204
205 /* Read Local Name */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200209 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 /* Optional initialization */
212
213 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200214 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200215 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700218 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200220
221 bacpy(&cp.bdaddr, BDADDR_ANY);
222 cp.delete_all = 1;
223 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200226static void amp_init(struct hci_dev *hdev)
227{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200228 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200230 /* Reset */
231 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
232
233 /* Read Local Version */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
235}
236
237static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
238{
239 struct sk_buff *skb;
240
241 BT_DBG("%s %ld", hdev->name, opt);
242
243 /* Driver initialization */
244
245 /* Special commands */
246 while ((skb = skb_dequeue(&hdev->driver_init))) {
247 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
248 skb->dev = (void *) hdev;
249
250 skb_queue_tail(&hdev->cmd_q, skb);
251 queue_work(hdev->workqueue, &hdev->cmd_work);
252 }
253 skb_queue_purge(&hdev->driver_init);
254
255 switch (hdev->dev_type) {
256 case HCI_BREDR:
257 bredr_init(hdev);
258 break;
259
260 case HCI_AMP:
261 amp_init(hdev);
262 break;
263
264 default:
265 BT_ERR("Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269}
270
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300271static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272{
273 BT_DBG("%s", hdev->name);
274
275 /* Read LE buffer size */
276 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277}
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __u8 scan = opt;
282
283 BT_DBG("%s %x", hdev->name, scan);
284
285 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200286 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
289static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 auth = opt;
292
293 BT_DBG("%s %x", hdev->name, auth);
294
295 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200296 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
299static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 encrypt = opt;
302
303 BT_DBG("%s %x", hdev->name, encrypt);
304
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200305 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200309static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __le16 policy = cpu_to_le16(opt);
312
Marcel Holtmanna418b892008-11-30 12:17:28 +0100313 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200314
315 /* Default link policy */
316 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317}
318
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900319/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 * Device is held on return. */
321struct hci_dev *hci_dev_get(int index)
322{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200323 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
325 BT_DBG("%d", index);
326
327 if (index < 0)
328 return NULL;
329
330 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200331 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200342
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200343bool hci_discovery_active(struct hci_dev *hdev)
344{
345 struct discovery_state *discov = &hdev->discovery;
346
Andre Guedes6fbe1952012-02-03 17:47:58 -0300347 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300348 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300349 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200350 return true;
351
Andre Guedes6fbe1952012-02-03 17:47:58 -0300352 default:
353 return false;
354 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200355}
356
Johan Hedbergff9ef572012-01-04 14:23:45 +0200357void hci_discovery_set_state(struct hci_dev *hdev, int state)
358{
359 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
360
361 if (hdev->discovery.state == state)
362 return;
363
364 switch (state) {
365 case DISCOVERY_STOPPED:
Andre Guedes4aab14e2012-02-17 20:39:36 -0300366 hdev->discovery.type = 0;
367
Andre Guedes7b99b652012-02-13 15:41:02 -0300368 if (hdev->discovery.state != DISCOVERY_STARTING)
369 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200370 break;
371 case DISCOVERY_STARTING:
372 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300373 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200374 mgmt_discovering(hdev, 1);
375 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200376 case DISCOVERY_RESOLVING:
377 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200378 case DISCOVERY_STOPPING:
379 break;
380 }
381
382 hdev->discovery.state = state;
383}
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385static void inquiry_cache_flush(struct hci_dev *hdev)
386{
Johan Hedberg30883512012-01-04 14:16:21 +0200387 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200388 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Johan Hedberg561aafb2012-01-04 13:31:59 +0200390 list_for_each_entry_safe(p, n, &cache->all, all) {
391 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200392 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200394
395 INIT_LIST_HEAD(&cache->unknown);
396 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200397 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398}
399
400struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
401{
Johan Hedberg30883512012-01-04 14:16:21 +0200402 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 struct inquiry_entry *e;
404
405 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
406
Johan Hedberg561aafb2012-01-04 13:31:59 +0200407 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200409 return e;
410 }
411
412 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413}
414
Johan Hedberg561aafb2012-01-04 13:31:59 +0200415struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
416 bdaddr_t *bdaddr)
417{
Johan Hedberg30883512012-01-04 14:16:21 +0200418 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200419 struct inquiry_entry *e;
420
421 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
422
423 list_for_each_entry(e, &cache->unknown, list) {
424 if (!bacmp(&e->data.bdaddr, bdaddr))
425 return e;
426 }
427
428 return NULL;
429}
430
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200431struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
432 bdaddr_t *bdaddr,
433 int state)
434{
435 struct discovery_state *cache = &hdev->discovery;
436 struct inquiry_entry *e;
437
438 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
439
440 list_for_each_entry(e, &cache->resolve, list) {
441 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
442 return e;
443 if (!bacmp(&e->data.bdaddr, bdaddr))
444 return e;
445 }
446
447 return NULL;
448}
449
Johan Hedberga3d4e202012-01-09 00:53:02 +0200450void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
451 struct inquiry_entry *ie)
452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct list_head *pos = &cache->resolve;
455 struct inquiry_entry *p;
456
457 list_del(&ie->list);
458
459 list_for_each_entry(p, &cache->resolve, list) {
460 if (p->name_state != NAME_PENDING &&
461 abs(p->data.rssi) >= abs(ie->data.rssi))
462 break;
463 pos = &p->list;
464 }
465
466 list_add(&ie->list, pos);
467}
468
Johan Hedberg31754052012-01-04 13:39:52 +0200469bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200470 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471{
Johan Hedberg30883512012-01-04 14:16:21 +0200472 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200473 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
475 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
476
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200477 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200478 if (ie) {
479 if (ie->name_state == NAME_NEEDED &&
480 data->rssi != ie->data.rssi) {
481 ie->data.rssi = data->rssi;
482 hci_inquiry_cache_update_resolve(hdev, ie);
483 }
484
Johan Hedberg561aafb2012-01-04 13:31:59 +0200485 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200486 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200487
Johan Hedberg561aafb2012-01-04 13:31:59 +0200488 /* Entry not in the cache. Add new one. */
489 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
490 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200491 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200492
493 list_add(&ie->all, &cache->all);
494
495 if (name_known) {
496 ie->name_state = NAME_KNOWN;
497 } else {
498 ie->name_state = NAME_NOT_KNOWN;
499 list_add(&ie->list, &cache->unknown);
500 }
501
502update:
503 if (name_known && ie->name_state != NAME_KNOWN &&
504 ie->name_state != NAME_PENDING) {
505 ie->name_state = NAME_KNOWN;
506 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200509 memcpy(&ie->data, data, sizeof(*data));
510 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200512
513 if (ie->name_state == NAME_NOT_KNOWN)
514 return false;
515
516 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517}
518
519static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
520{
Johan Hedberg30883512012-01-04 14:16:21 +0200521 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 struct inquiry_info *info = (struct inquiry_info *) buf;
523 struct inquiry_entry *e;
524 int copied = 0;
525
Johan Hedberg561aafb2012-01-04 13:31:59 +0200526 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200528
529 if (copied >= num)
530 break;
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 bacpy(&info->bdaddr, &data->bdaddr);
533 info->pscan_rep_mode = data->pscan_rep_mode;
534 info->pscan_period_mode = data->pscan_period_mode;
535 info->pscan_mode = data->pscan_mode;
536 memcpy(info->dev_class, data->dev_class, 3);
537 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200540 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 }
542
543 BT_DBG("cache %p, copied %d", cache, copied);
544 return copied;
545}
546
547static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
548{
549 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
550 struct hci_cp_inquiry cp;
551
552 BT_DBG("%s", hdev->name);
553
554 if (test_bit(HCI_INQUIRY, &hdev->flags))
555 return;
556
557 /* Start Inquiry */
558 memcpy(&cp.lap, &ir->lap, 3);
559 cp.length = ir->length;
560 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200561 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562}
563
564int hci_inquiry(void __user *arg)
565{
566 __u8 __user *ptr = arg;
567 struct hci_inquiry_req ir;
568 struct hci_dev *hdev;
569 int err = 0, do_inquiry = 0, max_rsp;
570 long timeo;
571 __u8 *buf;
572
573 if (copy_from_user(&ir, ptr, sizeof(ir)))
574 return -EFAULT;
575
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200576 hdev = hci_dev_get(ir.dev_id);
577 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 return -ENODEV;
579
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300580 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900581 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200582 inquiry_cache_empty(hdev) ||
583 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 inquiry_cache_flush(hdev);
585 do_inquiry = 1;
586 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300587 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Marcel Holtmann04837f62006-07-03 10:02:33 +0200589 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200590
591 if (do_inquiry) {
592 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
593 if (err < 0)
594 goto done;
595 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597 /* for unlimited number of responses we will use buffer with 255 entries */
598 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
599
600 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
601 * copy it to the user space.
602 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100603 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200604 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 err = -ENOMEM;
606 goto done;
607 }
608
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300609 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300611 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 BT_DBG("num_rsp %d", ir.num_rsp);
614
615 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
616 ptr += sizeof(ir);
617 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
618 ir.num_rsp))
619 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900620 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 err = -EFAULT;
622
623 kfree(buf);
624
625done:
626 hci_dev_put(hdev);
627 return err;
628}
629
630/* ---- HCI ioctl helpers ---- */
631
632int hci_dev_open(__u16 dev)
633{
634 struct hci_dev *hdev;
635 int ret = 0;
636
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200637 hdev = hci_dev_get(dev);
638 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 return -ENODEV;
640
641 BT_DBG("%s %p", hdev->name, hdev);
642
643 hci_req_lock(hdev);
644
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200645 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
646 ret = -ERFKILL;
647 goto done;
648 }
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 if (test_bit(HCI_UP, &hdev->flags)) {
651 ret = -EALREADY;
652 goto done;
653 }
654
655 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
656 set_bit(HCI_RAW, &hdev->flags);
657
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200658 /* Treat all non BR/EDR controllers as raw devices if
659 enable_hs is not set */
660 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100661 set_bit(HCI_RAW, &hdev->flags);
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (hdev->open(hdev)) {
664 ret = -EIO;
665 goto done;
666 }
667
668 if (!test_bit(HCI_RAW, &hdev->flags)) {
669 atomic_set(&hdev->cmd_cnt, 1);
670 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200671 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
Marcel Holtmann04837f62006-07-03 10:02:33 +0200673 ret = __hci_request(hdev, hci_init_req, 0,
674 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Andre Guedeseead27d2011-06-30 19:20:55 -0300676 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300677 ret = __hci_request(hdev, hci_le_init_req, 0,
678 msecs_to_jiffies(HCI_INIT_TIMEOUT));
679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 clear_bit(HCI_INIT, &hdev->flags);
681 }
682
683 if (!ret) {
684 hci_dev_hold(hdev);
685 set_bit(HCI_UP, &hdev->flags);
686 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200687 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300688 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200689 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300690 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200691 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900692 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200694 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200695 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400696 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698 skb_queue_purge(&hdev->cmd_q);
699 skb_queue_purge(&hdev->rx_q);
700
701 if (hdev->flush)
702 hdev->flush(hdev);
703
704 if (hdev->sent_cmd) {
705 kfree_skb(hdev->sent_cmd);
706 hdev->sent_cmd = NULL;
707 }
708
709 hdev->close(hdev);
710 hdev->flags = 0;
711 }
712
713done:
714 hci_req_unlock(hdev);
715 hci_dev_put(hdev);
716 return ret;
717}
718
719static int hci_dev_do_close(struct hci_dev *hdev)
720{
721 BT_DBG("%s %p", hdev->name, hdev);
722
Andre Guedes28b75a82012-02-03 17:48:00 -0300723 cancel_work_sync(&hdev->le_scan);
724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 hci_req_cancel(hdev, ENODEV);
726 hci_req_lock(hdev);
727
728 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300729 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 hci_req_unlock(hdev);
731 return 0;
732 }
733
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200734 /* Flush RX and TX works */
735 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400736 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200738 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200739 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200740 hdev->discov_timeout = 0;
741 }
742
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200743 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200744 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200745
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200746 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200747 cancel_delayed_work(&hdev->service_cache);
748
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300749 cancel_delayed_work_sync(&hdev->le_scan_disable);
750
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300751 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 inquiry_cache_flush(hdev);
753 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300754 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 hci_notify(hdev, HCI_DEV_DOWN);
757
758 if (hdev->flush)
759 hdev->flush(hdev);
760
761 /* Reset device */
762 skb_queue_purge(&hdev->cmd_q);
763 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200764 if (!test_bit(HCI_RAW, &hdev->flags) &&
765 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200767 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200768 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 clear_bit(HCI_INIT, &hdev->flags);
770 }
771
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200772 /* flush cmd work */
773 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 /* Drop queues */
776 skb_queue_purge(&hdev->rx_q);
777 skb_queue_purge(&hdev->cmd_q);
778 skb_queue_purge(&hdev->raw_q);
779
780 /* Drop last sent command */
781 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300782 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 kfree_skb(hdev->sent_cmd);
784 hdev->sent_cmd = NULL;
785 }
786
787 /* After this point our queues are empty
788 * and no tasks are scheduled. */
789 hdev->close(hdev);
790
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300791 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200792 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300793 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 /* Clear flags */
796 hdev->flags = 0;
797
798 hci_req_unlock(hdev);
799
800 hci_dev_put(hdev);
801 return 0;
802}
803
804int hci_dev_close(__u16 dev)
805{
806 struct hci_dev *hdev;
807 int err;
808
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200809 hdev = hci_dev_get(dev);
810 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 return -ENODEV;
812 err = hci_dev_do_close(hdev);
813 hci_dev_put(hdev);
814 return err;
815}
816
817int hci_dev_reset(__u16 dev)
818{
819 struct hci_dev *hdev;
820 int ret = 0;
821
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200822 hdev = hci_dev_get(dev);
823 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 return -ENODEV;
825
826 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
828 if (!test_bit(HCI_UP, &hdev->flags))
829 goto done;
830
831 /* Drop queues */
832 skb_queue_purge(&hdev->rx_q);
833 skb_queue_purge(&hdev->cmd_q);
834
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300835 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 inquiry_cache_flush(hdev);
837 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300838 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
840 if (hdev->flush)
841 hdev->flush(hdev);
842
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900843 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300844 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200847 ret = __hci_request(hdev, hci_reset_req, 0,
848 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
850done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 hci_req_unlock(hdev);
852 hci_dev_put(hdev);
853 return ret;
854}
855
856int hci_dev_reset_stat(__u16 dev)
857{
858 struct hci_dev *hdev;
859 int ret = 0;
860
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200861 hdev = hci_dev_get(dev);
862 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 return -ENODEV;
864
865 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
866
867 hci_dev_put(hdev);
868
869 return ret;
870}
871
872int hci_dev_cmd(unsigned int cmd, void __user *arg)
873{
874 struct hci_dev *hdev;
875 struct hci_dev_req dr;
876 int err = 0;
877
878 if (copy_from_user(&dr, arg, sizeof(dr)))
879 return -EFAULT;
880
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200881 hdev = hci_dev_get(dr.dev_id);
882 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 return -ENODEV;
884
885 switch (cmd) {
886 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200887 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
888 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 break;
890
891 case HCISETENCRYPT:
892 if (!lmp_encrypt_capable(hdev)) {
893 err = -EOPNOTSUPP;
894 break;
895 }
896
897 if (!test_bit(HCI_AUTH, &hdev->flags)) {
898 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200899 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
900 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 if (err)
902 break;
903 }
904
Marcel Holtmann04837f62006-07-03 10:02:33 +0200905 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
906 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 break;
908
909 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200910 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
911 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 break;
913
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200914 case HCISETLINKPOL:
915 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
917 break;
918
919 case HCISETLINKMODE:
920 hdev->link_mode = ((__u16) dr.dev_opt) &
921 (HCI_LM_MASTER | HCI_LM_ACCEPT);
922 break;
923
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 case HCISETPTYPE:
925 hdev->pkt_type = (__u16) dr.dev_opt;
926 break;
927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200929 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
930 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 break;
932
933 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200934 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
935 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 break;
937
938 default:
939 err = -EINVAL;
940 break;
941 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 hci_dev_put(hdev);
944 return err;
945}
946
947int hci_get_dev_list(void __user *arg)
948{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200949 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 struct hci_dev_list_req *dl;
951 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 int n = 0, size, err;
953 __u16 dev_num;
954
955 if (get_user(dev_num, (__u16 __user *) arg))
956 return -EFAULT;
957
958 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
959 return -EINVAL;
960
961 size = sizeof(*dl) + dev_num * sizeof(*dr);
962
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200963 dl = kzalloc(size, GFP_KERNEL);
964 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 return -ENOMEM;
966
967 dr = dl->dev_req;
968
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200969 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200970 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200971 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200972 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200973
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200974 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
975 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 (dr + n)->dev_id = hdev->id;
978 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 if (++n >= dev_num)
981 break;
982 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200983 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 dl->dev_num = n;
986 size = sizeof(*dl) + n * sizeof(*dr);
987
988 err = copy_to_user(arg, dl, size);
989 kfree(dl);
990
991 return err ? -EFAULT : 0;
992}
993
994int hci_get_dev_info(void __user *arg)
995{
996 struct hci_dev *hdev;
997 struct hci_dev_info di;
998 int err = 0;
999
1000 if (copy_from_user(&di, arg, sizeof(di)))
1001 return -EFAULT;
1002
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001003 hdev = hci_dev_get(di.dev_id);
1004 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 return -ENODEV;
1006
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001007 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001008 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001009
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001010 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1011 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 strcpy(di.name, hdev->name);
1014 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001015 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 di.flags = hdev->flags;
1017 di.pkt_type = hdev->pkt_type;
1018 di.acl_mtu = hdev->acl_mtu;
1019 di.acl_pkts = hdev->acl_pkts;
1020 di.sco_mtu = hdev->sco_mtu;
1021 di.sco_pkts = hdev->sco_pkts;
1022 di.link_policy = hdev->link_policy;
1023 di.link_mode = hdev->link_mode;
1024
1025 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1026 memcpy(&di.features, &hdev->features, sizeof(di.features));
1027
1028 if (copy_to_user(arg, &di, sizeof(di)))
1029 err = -EFAULT;
1030
1031 hci_dev_put(hdev);
1032
1033 return err;
1034}
1035
1036/* ---- Interface to HCI drivers ---- */
1037
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001038static int hci_rfkill_set_block(void *data, bool blocked)
1039{
1040 struct hci_dev *hdev = data;
1041
1042 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1043
1044 if (!blocked)
1045 return 0;
1046
1047 hci_dev_do_close(hdev);
1048
1049 return 0;
1050}
1051
1052static const struct rfkill_ops hci_rfkill_ops = {
1053 .set_block = hci_rfkill_set_block,
1054};
1055
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056/* Alloc HCI device */
1057struct hci_dev *hci_alloc_dev(void)
1058{
1059 struct hci_dev *hdev;
1060
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001061 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 if (!hdev)
1063 return NULL;
1064
David Herrmann0ac7e702011-10-08 14:58:47 +02001065 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 skb_queue_head_init(&hdev->driver_init);
1067
1068 return hdev;
1069}
1070EXPORT_SYMBOL(hci_alloc_dev);
1071
1072/* Free HCI device */
1073void hci_free_dev(struct hci_dev *hdev)
1074{
1075 skb_queue_purge(&hdev->driver_init);
1076
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001077 /* will free via device release */
1078 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079}
1080EXPORT_SYMBOL(hci_free_dev);
1081
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001082static void hci_power_on(struct work_struct *work)
1083{
1084 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1085
1086 BT_DBG("%s", hdev->name);
1087
1088 if (hci_dev_open(hdev->id) < 0)
1089 return;
1090
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001091 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001092 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001093 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001094
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001095 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001096 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001097}
1098
1099static void hci_power_off(struct work_struct *work)
1100{
Johan Hedberg32435532011-11-07 22:16:04 +02001101 struct hci_dev *hdev = container_of(work, struct hci_dev,
1102 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001103
1104 BT_DBG("%s", hdev->name);
1105
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001106 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg32435532011-11-07 22:16:04 +02001107
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001108 hci_dev_close(hdev->id);
1109}
1110
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001111static void hci_discov_off(struct work_struct *work)
1112{
1113 struct hci_dev *hdev;
1114 u8 scan = SCAN_PAGE;
1115
1116 hdev = container_of(work, struct hci_dev, discov_off.work);
1117
1118 BT_DBG("%s", hdev->name);
1119
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001120 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001121
1122 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1123
1124 hdev->discov_timeout = 0;
1125
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001126 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001127}
1128
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001129int hci_uuids_clear(struct hci_dev *hdev)
1130{
1131 struct list_head *p, *n;
1132
1133 list_for_each_safe(p, n, &hdev->uuids) {
1134 struct bt_uuid *uuid;
1135
1136 uuid = list_entry(p, struct bt_uuid, list);
1137
1138 list_del(p);
1139 kfree(uuid);
1140 }
1141
1142 return 0;
1143}
1144
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001145int hci_link_keys_clear(struct hci_dev *hdev)
1146{
1147 struct list_head *p, *n;
1148
1149 list_for_each_safe(p, n, &hdev->link_keys) {
1150 struct link_key *key;
1151
1152 key = list_entry(p, struct link_key, list);
1153
1154 list_del(p);
1155 kfree(key);
1156 }
1157
1158 return 0;
1159}
1160
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001161int hci_smp_ltks_clear(struct hci_dev *hdev)
1162{
1163 struct smp_ltk *k, *tmp;
1164
1165 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1166 list_del(&k->list);
1167 kfree(k);
1168 }
1169
1170 return 0;
1171}
1172
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001173struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1174{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001175 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001176
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001177 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001178 if (bacmp(bdaddr, &k->bdaddr) == 0)
1179 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001180
1181 return NULL;
1182}
1183
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001184static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1185 u8 key_type, u8 old_key_type)
1186{
1187 /* Legacy key */
1188 if (key_type < 0x03)
1189 return 1;
1190
1191 /* Debug keys are insecure so don't store them persistently */
1192 if (key_type == HCI_LK_DEBUG_COMBINATION)
1193 return 0;
1194
1195 /* Changed combination key and there's no previous one */
1196 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1197 return 0;
1198
1199 /* Security mode 3 case */
1200 if (!conn)
1201 return 1;
1202
1203 /* Neither local nor remote side had no-bonding as requirement */
1204 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1205 return 1;
1206
1207 /* Local side had dedicated bonding as requirement */
1208 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1209 return 1;
1210
1211 /* Remote side had dedicated bonding as requirement */
1212 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1213 return 1;
1214
1215 /* If none of the above criteria match, then don't store the key
1216 * persistently */
1217 return 0;
1218}
1219
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001220struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001221{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001222 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001223
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001224 list_for_each_entry(k, &hdev->long_term_keys, list) {
1225 if (k->ediv != ediv ||
1226 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001227 continue;
1228
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001229 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001230 }
1231
1232 return NULL;
1233}
1234EXPORT_SYMBOL(hci_find_ltk);
1235
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001236struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1237 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001238{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001239 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001240
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001241 list_for_each_entry(k, &hdev->long_term_keys, list)
1242 if (addr_type == k->bdaddr_type &&
1243 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001244 return k;
1245
1246 return NULL;
1247}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001248EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001249
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001250int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1251 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001252{
1253 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001254 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001255
1256 old_key = hci_find_link_key(hdev, bdaddr);
1257 if (old_key) {
1258 old_key_type = old_key->type;
1259 key = old_key;
1260 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001261 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001262 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1263 if (!key)
1264 return -ENOMEM;
1265 list_add(&key->list, &hdev->link_keys);
1266 }
1267
1268 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1269
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001270 /* Some buggy controller combinations generate a changed
1271 * combination key for legacy pairing even when there's no
1272 * previous key */
1273 if (type == HCI_LK_CHANGED_COMBINATION &&
1274 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001275 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001276 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001277 if (conn)
1278 conn->key_type = type;
1279 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001280
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001281 bacpy(&key->bdaddr, bdaddr);
1282 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001283 key->pin_len = pin_len;
1284
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001285 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001286 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001287 else
1288 key->type = type;
1289
Johan Hedberg4df378a2011-04-28 11:29:03 -07001290 if (!new_key)
1291 return 0;
1292
1293 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1294
Johan Hedberg744cf192011-11-08 20:40:14 +02001295 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001296
1297 if (!persistent) {
1298 list_del(&key->list);
1299 kfree(key);
1300 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001301
1302 return 0;
1303}
1304
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001305int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1306 int new_key, u8 authenticated, u8 tk[16],
1307 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001308{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001309 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001310
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001311 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1312 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001313
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001314 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1315 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001316 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001317 else {
1318 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001319 if (!key)
1320 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001321 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001322 }
1323
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001325 key->bdaddr_type = addr_type;
1326 memcpy(key->val, tk, sizeof(key->val));
1327 key->authenticated = authenticated;
1328 key->ediv = ediv;
1329 key->enc_size = enc_size;
1330 key->type = type;
1331 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001332
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001333 if (!new_key)
1334 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001335
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001336 if (type & HCI_SMP_LTK)
1337 mgmt_new_ltk(hdev, key, 1);
1338
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001339 return 0;
1340}
1341
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001342int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1343{
1344 struct link_key *key;
1345
1346 key = hci_find_link_key(hdev, bdaddr);
1347 if (!key)
1348 return -ENOENT;
1349
1350 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1351
1352 list_del(&key->list);
1353 kfree(key);
1354
1355 return 0;
1356}
1357
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001358int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1359{
1360 struct smp_ltk *k, *tmp;
1361
1362 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1363 if (bacmp(bdaddr, &k->bdaddr))
1364 continue;
1365
1366 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1367
1368 list_del(&k->list);
1369 kfree(k);
1370 }
1371
1372 return 0;
1373}
1374
Ville Tervo6bd32322011-02-16 16:32:41 +02001375/* HCI command timer function */
1376static void hci_cmd_timer(unsigned long arg)
1377{
1378 struct hci_dev *hdev = (void *) arg;
1379
1380 BT_ERR("%s command tx timeout", hdev->name);
1381 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001382 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001383}
1384
Szymon Janc2763eda2011-03-22 13:12:22 +01001385struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1386 bdaddr_t *bdaddr)
1387{
1388 struct oob_data *data;
1389
1390 list_for_each_entry(data, &hdev->remote_oob_data, list)
1391 if (bacmp(bdaddr, &data->bdaddr) == 0)
1392 return data;
1393
1394 return NULL;
1395}
1396
1397int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1398{
1399 struct oob_data *data;
1400
1401 data = hci_find_remote_oob_data(hdev, bdaddr);
1402 if (!data)
1403 return -ENOENT;
1404
1405 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1406
1407 list_del(&data->list);
1408 kfree(data);
1409
1410 return 0;
1411}
1412
1413int hci_remote_oob_data_clear(struct hci_dev *hdev)
1414{
1415 struct oob_data *data, *n;
1416
1417 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1418 list_del(&data->list);
1419 kfree(data);
1420 }
1421
1422 return 0;
1423}
1424
1425int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1426 u8 *randomizer)
1427{
1428 struct oob_data *data;
1429
1430 data = hci_find_remote_oob_data(hdev, bdaddr);
1431
1432 if (!data) {
1433 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1434 if (!data)
1435 return -ENOMEM;
1436
1437 bacpy(&data->bdaddr, bdaddr);
1438 list_add(&data->list, &hdev->remote_oob_data);
1439 }
1440
1441 memcpy(data->hash, hash, sizeof(data->hash));
1442 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1443
1444 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1445
1446 return 0;
1447}
1448
Antti Julkub2a66aa2011-06-15 12:01:14 +03001449struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1450 bdaddr_t *bdaddr)
1451{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001452 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001453
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001454 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001455 if (bacmp(bdaddr, &b->bdaddr) == 0)
1456 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001457
1458 return NULL;
1459}
1460
1461int hci_blacklist_clear(struct hci_dev *hdev)
1462{
1463 struct list_head *p, *n;
1464
1465 list_for_each_safe(p, n, &hdev->blacklist) {
1466 struct bdaddr_list *b;
1467
1468 b = list_entry(p, struct bdaddr_list, list);
1469
1470 list_del(p);
1471 kfree(b);
1472 }
1473
1474 return 0;
1475}
1476
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001477int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001478{
1479 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001480
1481 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1482 return -EBADF;
1483
Antti Julku5e762442011-08-25 16:48:02 +03001484 if (hci_blacklist_lookup(hdev, bdaddr))
1485 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001486
1487 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001488 if (!entry)
1489 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001490
1491 bacpy(&entry->bdaddr, bdaddr);
1492
1493 list_add(&entry->list, &hdev->blacklist);
1494
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001495 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001496}
1497
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001498int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001499{
1500 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001501
Szymon Janc1ec918c2011-11-16 09:32:21 +01001502 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001503 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001504
1505 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001506 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001507 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508
1509 list_del(&entry->list);
1510 kfree(entry);
1511
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001512 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001513}
1514
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001515static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001516{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001517 struct hci_dev *hdev = container_of(work, struct hci_dev,
1518 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001519
1520 hci_dev_lock(hdev);
1521
1522 hci_adv_entries_clear(hdev);
1523
1524 hci_dev_unlock(hdev);
1525}
1526
Andre Guedes76c86862011-05-26 16:23:50 -03001527int hci_adv_entries_clear(struct hci_dev *hdev)
1528{
1529 struct adv_entry *entry, *tmp;
1530
1531 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1532 list_del(&entry->list);
1533 kfree(entry);
1534 }
1535
1536 BT_DBG("%s adv cache cleared", hdev->name);
1537
1538 return 0;
1539}
1540
1541struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1542{
1543 struct adv_entry *entry;
1544
1545 list_for_each_entry(entry, &hdev->adv_entries, list)
1546 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1547 return entry;
1548
1549 return NULL;
1550}
1551
1552static inline int is_connectable_adv(u8 evt_type)
1553{
1554 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1555 return 1;
1556
1557 return 0;
1558}
1559
1560int hci_add_adv_entry(struct hci_dev *hdev,
1561 struct hci_ev_le_advertising_info *ev)
1562{
1563 struct adv_entry *entry;
1564
1565 if (!is_connectable_adv(ev->evt_type))
1566 return -EINVAL;
1567
1568 /* Only new entries should be added to adv_entries. So, if
1569 * bdaddr was found, don't add it. */
1570 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1571 return 0;
1572
Andre Guedes4777bfd2012-01-30 23:31:28 -03001573 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001574 if (!entry)
1575 return -ENOMEM;
1576
1577 bacpy(&entry->bdaddr, &ev->bdaddr);
1578 entry->bdaddr_type = ev->bdaddr_type;
1579
1580 list_add(&entry->list, &hdev->adv_entries);
1581
1582 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1583 batostr(&entry->bdaddr), entry->bdaddr_type);
1584
1585 return 0;
1586}
1587
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001588static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1589{
1590 struct le_scan_params *param = (struct le_scan_params *) opt;
1591 struct hci_cp_le_set_scan_param cp;
1592
1593 memset(&cp, 0, sizeof(cp));
1594 cp.type = param->type;
1595 cp.interval = cpu_to_le16(param->interval);
1596 cp.window = cpu_to_le16(param->window);
1597
1598 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1599}
1600
1601static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1602{
1603 struct hci_cp_le_set_scan_enable cp;
1604
1605 memset(&cp, 0, sizeof(cp));
1606 cp.enable = 1;
1607
1608 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1609}
1610
1611static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1612 u16 window, int timeout)
1613{
1614 long timeo = msecs_to_jiffies(3000);
1615 struct le_scan_params param;
1616 int err;
1617
1618 BT_DBG("%s", hdev->name);
1619
1620 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1621 return -EINPROGRESS;
1622
1623 param.type = type;
1624 param.interval = interval;
1625 param.window = window;
1626
1627 hci_req_lock(hdev);
1628
1629 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1630 timeo);
1631 if (!err)
1632 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1633
1634 hci_req_unlock(hdev);
1635
1636 if (err < 0)
1637 return err;
1638
1639 schedule_delayed_work(&hdev->le_scan_disable,
1640 msecs_to_jiffies(timeout));
1641
1642 return 0;
1643}
1644
1645static void le_scan_disable_work(struct work_struct *work)
1646{
1647 struct hci_dev *hdev = container_of(work, struct hci_dev,
1648 le_scan_disable.work);
1649 struct hci_cp_le_set_scan_enable cp;
1650
1651 BT_DBG("%s", hdev->name);
1652
1653 memset(&cp, 0, sizeof(cp));
1654
1655 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1656}
1657
Andre Guedes28b75a82012-02-03 17:48:00 -03001658static void le_scan_work(struct work_struct *work)
1659{
1660 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1661 struct le_scan_params *param = &hdev->le_scan_params;
1662
1663 BT_DBG("%s", hdev->name);
1664
1665 hci_do_le_scan(hdev, param->type, param->interval,
1666 param->window, param->timeout);
1667}
1668
1669int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1670 int timeout)
1671{
1672 struct le_scan_params *param = &hdev->le_scan_params;
1673
1674 BT_DBG("%s", hdev->name);
1675
1676 if (work_busy(&hdev->le_scan))
1677 return -EINPROGRESS;
1678
1679 param->type = type;
1680 param->interval = interval;
1681 param->window = window;
1682 param->timeout = timeout;
1683
1684 queue_work(system_long_wq, &hdev->le_scan);
1685
1686 return 0;
1687}
1688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689/* Register HCI device */
1690int hci_register_dev(struct hci_dev *hdev)
1691{
1692 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001693 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001695 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
David Herrmann010666a2012-01-07 15:47:07 +01001697 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 return -EINVAL;
1699
Mat Martineau08add512011-11-02 16:18:36 -07001700 /* Do not allow HCI_AMP devices to register at index 0,
1701 * so the index can be used as the AMP controller ID.
1702 */
1703 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1704
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001705 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 /* Find first available device id */
1708 list_for_each(p, &hci_dev_list) {
1709 if (list_entry(p, struct hci_dev, list)->id != id)
1710 break;
1711 head = p; id++;
1712 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001713
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 sprintf(hdev->name, "hci%d", id);
1715 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001716 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001718 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
1720 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001721 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001723 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001725 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
Marcel Holtmann04837f62006-07-03 10:02:33 +02001727 hdev->idle_timeout = 0;
1728 hdev->sniff_max_interval = 800;
1729 hdev->sniff_min_interval = 80;
1730
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001731 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001732 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001733 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
1736 skb_queue_head_init(&hdev->rx_q);
1737 skb_queue_head_init(&hdev->cmd_q);
1738 skb_queue_head_init(&hdev->raw_q);
1739
Ville Tervo6bd32322011-02-16 16:32:41 +02001740 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1741
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301742 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001743 hdev->reassembly[i] = NULL;
1744
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001746 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
Johan Hedberg30883512012-01-04 14:16:21 +02001748 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
1750 hci_conn_hash_init(hdev);
1751
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001752 INIT_LIST_HEAD(&hdev->mgmt_pending);
1753
David Millerea4bd8b2010-07-30 21:54:49 -07001754 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001755
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001756 INIT_LIST_HEAD(&hdev->uuids);
1757
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001758 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001759 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001760
Szymon Janc2763eda2011-03-22 13:12:22 +01001761 INIT_LIST_HEAD(&hdev->remote_oob_data);
1762
Andre Guedes76c86862011-05-26 16:23:50 -03001763 INIT_LIST_HEAD(&hdev->adv_entries);
1764
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001765 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001766 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001767 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001768
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001769 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1770
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1772
1773 atomic_set(&hdev->promisc, 0);
1774
Andre Guedes28b75a82012-02-03 17:48:00 -03001775 INIT_WORK(&hdev->le_scan, le_scan_work);
1776
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001777 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1778
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001779 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001781 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1782 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001783 if (!hdev->workqueue) {
1784 error = -ENOMEM;
1785 goto err;
1786 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001787
David Herrmann33ca9542011-10-08 14:58:49 +02001788 error = hci_add_sysfs(hdev);
1789 if (error < 0)
1790 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001792 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1793 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1794 if (hdev->rfkill) {
1795 if (rfkill_register(hdev->rfkill) < 0) {
1796 rfkill_destroy(hdev->rfkill);
1797 hdev->rfkill = NULL;
1798 }
1799 }
1800
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001801 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1802 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001803 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001806 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
1808 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001809
David Herrmann33ca9542011-10-08 14:58:49 +02001810err_wqueue:
1811 destroy_workqueue(hdev->workqueue);
1812err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001813 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001814 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001815 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001816
David Herrmann33ca9542011-10-08 14:58:49 +02001817 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819EXPORT_SYMBOL(hci_register_dev);
1820
1821/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001822void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823{
Marcel Holtmannef222012007-07-11 06:42:04 +02001824 int i;
1825
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001826 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001828 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001830 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
1832 hci_dev_do_close(hdev);
1833
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301834 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001835 kfree_skb(hdev->reassembly[i]);
1836
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001837 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001838 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001839 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001840 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001841 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001842 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001843
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001844 /* mgmt_index_removed should take care of emptying the
1845 * pending list */
1846 BUG_ON(!list_empty(&hdev->mgmt_pending));
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 hci_notify(hdev, HCI_DEV_UNREG);
1849
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001850 if (hdev->rfkill) {
1851 rfkill_unregister(hdev->rfkill);
1852 rfkill_destroy(hdev->rfkill);
1853 }
1854
David Herrmannce242972011-10-08 14:58:48 +02001855 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001856
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001857 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001858
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001859 destroy_workqueue(hdev->workqueue);
1860
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001861 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001862 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001863 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001864 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001865 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001866 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001867 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001868 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001869
David Herrmanndc946bd2012-01-07 15:47:24 +01001870 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871}
1872EXPORT_SYMBOL(hci_unregister_dev);
1873
1874/* Suspend HCI device */
1875int hci_suspend_dev(struct hci_dev *hdev)
1876{
1877 hci_notify(hdev, HCI_DEV_SUSPEND);
1878 return 0;
1879}
1880EXPORT_SYMBOL(hci_suspend_dev);
1881
1882/* Resume HCI device */
1883int hci_resume_dev(struct hci_dev *hdev)
1884{
1885 hci_notify(hdev, HCI_DEV_RESUME);
1886 return 0;
1887}
1888EXPORT_SYMBOL(hci_resume_dev);
1889
Marcel Holtmann76bca882009-11-18 00:40:39 +01001890/* Receive frame from HCI drivers */
1891int hci_recv_frame(struct sk_buff *skb)
1892{
1893 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1894 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1895 && !test_bit(HCI_INIT, &hdev->flags))) {
1896 kfree_skb(skb);
1897 return -ENXIO;
1898 }
1899
1900 /* Incomming skb */
1901 bt_cb(skb)->incoming = 1;
1902
1903 /* Time stamp */
1904 __net_timestamp(skb);
1905
Marcel Holtmann76bca882009-11-18 00:40:39 +01001906 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001907 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001908
Marcel Holtmann76bca882009-11-18 00:40:39 +01001909 return 0;
1910}
1911EXPORT_SYMBOL(hci_recv_frame);
1912
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301913static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001914 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301915{
1916 int len = 0;
1917 int hlen = 0;
1918 int remain = count;
1919 struct sk_buff *skb;
1920 struct bt_skb_cb *scb;
1921
1922 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1923 index >= NUM_REASSEMBLY)
1924 return -EILSEQ;
1925
1926 skb = hdev->reassembly[index];
1927
1928 if (!skb) {
1929 switch (type) {
1930 case HCI_ACLDATA_PKT:
1931 len = HCI_MAX_FRAME_SIZE;
1932 hlen = HCI_ACL_HDR_SIZE;
1933 break;
1934 case HCI_EVENT_PKT:
1935 len = HCI_MAX_EVENT_SIZE;
1936 hlen = HCI_EVENT_HDR_SIZE;
1937 break;
1938 case HCI_SCODATA_PKT:
1939 len = HCI_MAX_SCO_SIZE;
1940 hlen = HCI_SCO_HDR_SIZE;
1941 break;
1942 }
1943
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001944 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301945 if (!skb)
1946 return -ENOMEM;
1947
1948 scb = (void *) skb->cb;
1949 scb->expect = hlen;
1950 scb->pkt_type = type;
1951
1952 skb->dev = (void *) hdev;
1953 hdev->reassembly[index] = skb;
1954 }
1955
1956 while (count) {
1957 scb = (void *) skb->cb;
1958 len = min(scb->expect, (__u16)count);
1959
1960 memcpy(skb_put(skb, len), data, len);
1961
1962 count -= len;
1963 data += len;
1964 scb->expect -= len;
1965 remain = count;
1966
1967 switch (type) {
1968 case HCI_EVENT_PKT:
1969 if (skb->len == HCI_EVENT_HDR_SIZE) {
1970 struct hci_event_hdr *h = hci_event_hdr(skb);
1971 scb->expect = h->plen;
1972
1973 if (skb_tailroom(skb) < scb->expect) {
1974 kfree_skb(skb);
1975 hdev->reassembly[index] = NULL;
1976 return -ENOMEM;
1977 }
1978 }
1979 break;
1980
1981 case HCI_ACLDATA_PKT:
1982 if (skb->len == HCI_ACL_HDR_SIZE) {
1983 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1984 scb->expect = __le16_to_cpu(h->dlen);
1985
1986 if (skb_tailroom(skb) < scb->expect) {
1987 kfree_skb(skb);
1988 hdev->reassembly[index] = NULL;
1989 return -ENOMEM;
1990 }
1991 }
1992 break;
1993
1994 case HCI_SCODATA_PKT:
1995 if (skb->len == HCI_SCO_HDR_SIZE) {
1996 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1997 scb->expect = h->dlen;
1998
1999 if (skb_tailroom(skb) < scb->expect) {
2000 kfree_skb(skb);
2001 hdev->reassembly[index] = NULL;
2002 return -ENOMEM;
2003 }
2004 }
2005 break;
2006 }
2007
2008 if (scb->expect == 0) {
2009 /* Complete frame */
2010
2011 bt_cb(skb)->pkt_type = type;
2012 hci_recv_frame(skb);
2013
2014 hdev->reassembly[index] = NULL;
2015 return remain;
2016 }
2017 }
2018
2019 return remain;
2020}
2021
Marcel Holtmannef222012007-07-11 06:42:04 +02002022int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2023{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302024 int rem = 0;
2025
Marcel Holtmannef222012007-07-11 06:42:04 +02002026 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2027 return -EILSEQ;
2028
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002029 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002030 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302031 if (rem < 0)
2032 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002033
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302034 data += (count - rem);
2035 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002036 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002037
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302038 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002039}
2040EXPORT_SYMBOL(hci_recv_fragment);
2041
Suraj Sumangala99811512010-07-14 13:02:19 +05302042#define STREAM_REASSEMBLY 0
2043
2044int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2045{
2046 int type;
2047 int rem = 0;
2048
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002049 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302050 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2051
2052 if (!skb) {
2053 struct { char type; } *pkt;
2054
2055 /* Start of the frame */
2056 pkt = data;
2057 type = pkt->type;
2058
2059 data++;
2060 count--;
2061 } else
2062 type = bt_cb(skb)->pkt_type;
2063
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002064 rem = hci_reassembly(hdev, type, data, count,
2065 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302066 if (rem < 0)
2067 return rem;
2068
2069 data += (count - rem);
2070 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002071 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302072
2073 return rem;
2074}
2075EXPORT_SYMBOL(hci_recv_stream_fragment);
2076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077/* ---- Interface to upper protocols ---- */
2078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079int hci_register_cb(struct hci_cb *cb)
2080{
2081 BT_DBG("%p name %s", cb, cb->name);
2082
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002083 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002085 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086
2087 return 0;
2088}
2089EXPORT_SYMBOL(hci_register_cb);
2090
2091int hci_unregister_cb(struct hci_cb *cb)
2092{
2093 BT_DBG("%p name %s", cb, cb->name);
2094
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002095 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002097 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
2099 return 0;
2100}
2101EXPORT_SYMBOL(hci_unregister_cb);
2102
2103static int hci_send_frame(struct sk_buff *skb)
2104{
2105 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2106
2107 if (!hdev) {
2108 kfree_skb(skb);
2109 return -ENODEV;
2110 }
2111
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002112 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002114 /* Time stamp */
2115 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002117 /* Send copy to monitor */
2118 hci_send_to_monitor(hdev, skb);
2119
2120 if (atomic_read(&hdev->promisc)) {
2121 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002122 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 }
2124
2125 /* Get rid of skb owner, prior to sending to the driver. */
2126 skb_orphan(skb);
2127
2128 return hdev->send(skb);
2129}
2130
2131/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002132int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133{
2134 int len = HCI_COMMAND_HDR_SIZE + plen;
2135 struct hci_command_hdr *hdr;
2136 struct sk_buff *skb;
2137
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002138 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
2140 skb = bt_skb_alloc(len, GFP_ATOMIC);
2141 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002142 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 return -ENOMEM;
2144 }
2145
2146 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002147 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 hdr->plen = plen;
2149
2150 if (plen)
2151 memcpy(skb_put(skb, plen), param, plen);
2152
2153 BT_DBG("skb len %d", skb->len);
2154
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002155 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002157
Johan Hedberga5040ef2011-01-10 13:28:59 +02002158 if (test_bit(HCI_INIT, &hdev->flags))
2159 hdev->init_last_cmd = opcode;
2160
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002162 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
2164 return 0;
2165}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
2167/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002168void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169{
2170 struct hci_command_hdr *hdr;
2171
2172 if (!hdev->sent_cmd)
2173 return NULL;
2174
2175 hdr = (void *) hdev->sent_cmd->data;
2176
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002177 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 return NULL;
2179
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002180 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
2182 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2183}
2184
2185/* Send ACL data */
2186static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2187{
2188 struct hci_acl_hdr *hdr;
2189 int len = skb->len;
2190
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002191 skb_push(skb, HCI_ACL_HDR_SIZE);
2192 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002193 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002194 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2195 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196}
2197
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002198static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2199 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200{
2201 struct hci_dev *hdev = conn->hdev;
2202 struct sk_buff *list;
2203
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002204 list = skb_shinfo(skb)->frag_list;
2205 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 /* Non fragmented */
2207 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2208
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002209 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 } else {
2211 /* Fragmented */
2212 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2213
2214 skb_shinfo(skb)->frag_list = NULL;
2215
2216 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002217 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002219 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002220
2221 flags &= ~ACL_START;
2222 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 do {
2224 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002227 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002228 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
2230 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2231
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002232 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 } while (list);
2234
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002235 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002237}
2238
2239void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2240{
2241 struct hci_conn *conn = chan->conn;
2242 struct hci_dev *hdev = conn->hdev;
2243
2244 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2245
2246 skb->dev = (void *) hdev;
2247 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2248 hci_add_acl_hdr(skb, conn->handle, flags);
2249
2250 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002252 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253}
2254EXPORT_SYMBOL(hci_send_acl);
2255
2256/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002257void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258{
2259 struct hci_dev *hdev = conn->hdev;
2260 struct hci_sco_hdr hdr;
2261
2262 BT_DBG("%s len %d", hdev->name, skb->len);
2263
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002264 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 hdr.dlen = skb->len;
2266
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002267 skb_push(skb, HCI_SCO_HDR_SIZE);
2268 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002269 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
2271 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002272 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002273
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002275 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276}
2277EXPORT_SYMBOL(hci_send_sco);
2278
2279/* ---- HCI TX task (outgoing data) ---- */
2280
2281/* HCI Connection scheduler */
2282static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2283{
2284 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002285 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002288 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002290
2291 rcu_read_lock();
2292
2293 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002294 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002296
2297 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2298 continue;
2299
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 num++;
2301
2302 if (c->sent < min) {
2303 min = c->sent;
2304 conn = c;
2305 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002306
2307 if (hci_conn_num(hdev, type) == num)
2308 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 }
2310
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002311 rcu_read_unlock();
2312
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002314 int cnt, q;
2315
2316 switch (conn->type) {
2317 case ACL_LINK:
2318 cnt = hdev->acl_cnt;
2319 break;
2320 case SCO_LINK:
2321 case ESCO_LINK:
2322 cnt = hdev->sco_cnt;
2323 break;
2324 case LE_LINK:
2325 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2326 break;
2327 default:
2328 cnt = 0;
2329 BT_ERR("Unknown link type");
2330 }
2331
2332 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 *quote = q ? q : 1;
2334 } else
2335 *quote = 0;
2336
2337 BT_DBG("conn %p quote %d", conn, *quote);
2338 return conn;
2339}
2340
Ville Tervobae1f5d92011-02-10 22:38:53 -03002341static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342{
2343 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002344 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
Ville Tervobae1f5d92011-02-10 22:38:53 -03002346 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002348 rcu_read_lock();
2349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002351 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002352 if (c->type == type && c->sent) {
2353 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 hdev->name, batostr(&c->dst));
2355 hci_acl_disconn(c, 0x13);
2356 }
2357 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002358
2359 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360}
2361
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002362static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2363 int *quote)
2364{
2365 struct hci_conn_hash *h = &hdev->conn_hash;
2366 struct hci_chan *chan = NULL;
2367 int num = 0, min = ~0, cur_prio = 0;
2368 struct hci_conn *conn;
2369 int cnt, q, conn_num = 0;
2370
2371 BT_DBG("%s", hdev->name);
2372
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002373 rcu_read_lock();
2374
2375 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002376 struct hci_chan *tmp;
2377
2378 if (conn->type != type)
2379 continue;
2380
2381 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2382 continue;
2383
2384 conn_num++;
2385
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002386 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002387 struct sk_buff *skb;
2388
2389 if (skb_queue_empty(&tmp->data_q))
2390 continue;
2391
2392 skb = skb_peek(&tmp->data_q);
2393 if (skb->priority < cur_prio)
2394 continue;
2395
2396 if (skb->priority > cur_prio) {
2397 num = 0;
2398 min = ~0;
2399 cur_prio = skb->priority;
2400 }
2401
2402 num++;
2403
2404 if (conn->sent < min) {
2405 min = conn->sent;
2406 chan = tmp;
2407 }
2408 }
2409
2410 if (hci_conn_num(hdev, type) == conn_num)
2411 break;
2412 }
2413
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002414 rcu_read_unlock();
2415
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002416 if (!chan)
2417 return NULL;
2418
2419 switch (chan->conn->type) {
2420 case ACL_LINK:
2421 cnt = hdev->acl_cnt;
2422 break;
2423 case SCO_LINK:
2424 case ESCO_LINK:
2425 cnt = hdev->sco_cnt;
2426 break;
2427 case LE_LINK:
2428 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2429 break;
2430 default:
2431 cnt = 0;
2432 BT_ERR("Unknown link type");
2433 }
2434
2435 q = cnt / num;
2436 *quote = q ? q : 1;
2437 BT_DBG("chan %p quote %d", chan, *quote);
2438 return chan;
2439}
2440
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002441static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2442{
2443 struct hci_conn_hash *h = &hdev->conn_hash;
2444 struct hci_conn *conn;
2445 int num = 0;
2446
2447 BT_DBG("%s", hdev->name);
2448
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002449 rcu_read_lock();
2450
2451 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002452 struct hci_chan *chan;
2453
2454 if (conn->type != type)
2455 continue;
2456
2457 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2458 continue;
2459
2460 num++;
2461
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002462 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002463 struct sk_buff *skb;
2464
2465 if (chan->sent) {
2466 chan->sent = 0;
2467 continue;
2468 }
2469
2470 if (skb_queue_empty(&chan->data_q))
2471 continue;
2472
2473 skb = skb_peek(&chan->data_q);
2474 if (skb->priority >= HCI_PRIO_MAX - 1)
2475 continue;
2476
2477 skb->priority = HCI_PRIO_MAX - 1;
2478
2479 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2480 skb->priority);
2481 }
2482
2483 if (hci_conn_num(hdev, type) == num)
2484 break;
2485 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002486
2487 rcu_read_unlock();
2488
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002489}
2490
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002491static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2492{
2493 /* Calculate count of blocks used by this packet */
2494 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2495}
2496
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002497static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 if (!test_bit(HCI_RAW, &hdev->flags)) {
2500 /* ACL tx timeout must be longer than maximum
2501 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002502 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002503 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002504 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002506}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002508static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2509{
2510 unsigned int cnt = hdev->acl_cnt;
2511 struct hci_chan *chan;
2512 struct sk_buff *skb;
2513 int quote;
2514
2515 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002516
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002517 while (hdev->acl_cnt &&
2518 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002519 u32 priority = (skb_peek(&chan->data_q))->priority;
2520 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002521 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2522 skb->len, skb->priority);
2523
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002524 /* Stop if priority has changed */
2525 if (skb->priority < priority)
2526 break;
2527
2528 skb = skb_dequeue(&chan->data_q);
2529
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002530 hci_conn_enter_active_mode(chan->conn,
2531 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 hci_send_frame(skb);
2534 hdev->acl_last_tx = jiffies;
2535
2536 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002537 chan->sent++;
2538 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 }
2540 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002541
2542 if (cnt != hdev->acl_cnt)
2543 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544}
2545
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002546static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2547{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002548 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002549 struct hci_chan *chan;
2550 struct sk_buff *skb;
2551 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002552
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002553 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002554
2555 while (hdev->block_cnt > 0 &&
2556 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2557 u32 priority = (skb_peek(&chan->data_q))->priority;
2558 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2559 int blocks;
2560
2561 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2562 skb->len, skb->priority);
2563
2564 /* Stop if priority has changed */
2565 if (skb->priority < priority)
2566 break;
2567
2568 skb = skb_dequeue(&chan->data_q);
2569
2570 blocks = __get_blocks(hdev, skb);
2571 if (blocks > hdev->block_cnt)
2572 return;
2573
2574 hci_conn_enter_active_mode(chan->conn,
2575 bt_cb(skb)->force_active);
2576
2577 hci_send_frame(skb);
2578 hdev->acl_last_tx = jiffies;
2579
2580 hdev->block_cnt -= blocks;
2581 quote -= blocks;
2582
2583 chan->sent += blocks;
2584 chan->conn->sent += blocks;
2585 }
2586 }
2587
2588 if (cnt != hdev->block_cnt)
2589 hci_prio_recalculate(hdev, ACL_LINK);
2590}
2591
2592static inline void hci_sched_acl(struct hci_dev *hdev)
2593{
2594 BT_DBG("%s", hdev->name);
2595
2596 if (!hci_conn_num(hdev, ACL_LINK))
2597 return;
2598
2599 switch (hdev->flow_ctl_mode) {
2600 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2601 hci_sched_acl_pkt(hdev);
2602 break;
2603
2604 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2605 hci_sched_acl_blk(hdev);
2606 break;
2607 }
2608}
2609
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610/* Schedule SCO */
2611static inline void hci_sched_sco(struct hci_dev *hdev)
2612{
2613 struct hci_conn *conn;
2614 struct sk_buff *skb;
2615 int quote;
2616
2617 BT_DBG("%s", hdev->name);
2618
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002619 if (!hci_conn_num(hdev, SCO_LINK))
2620 return;
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2623 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2624 BT_DBG("skb %p len %d", skb, skb->len);
2625 hci_send_frame(skb);
2626
2627 conn->sent++;
2628 if (conn->sent == ~0)
2629 conn->sent = 0;
2630 }
2631 }
2632}
2633
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002634static inline void hci_sched_esco(struct hci_dev *hdev)
2635{
2636 struct hci_conn *conn;
2637 struct sk_buff *skb;
2638 int quote;
2639
2640 BT_DBG("%s", hdev->name);
2641
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002642 if (!hci_conn_num(hdev, ESCO_LINK))
2643 return;
2644
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002645 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2646 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2647 BT_DBG("skb %p len %d", skb, skb->len);
2648 hci_send_frame(skb);
2649
2650 conn->sent++;
2651 if (conn->sent == ~0)
2652 conn->sent = 0;
2653 }
2654 }
2655}
2656
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002657static inline void hci_sched_le(struct hci_dev *hdev)
2658{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002659 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002660 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002661 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002662
2663 BT_DBG("%s", hdev->name);
2664
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002665 if (!hci_conn_num(hdev, LE_LINK))
2666 return;
2667
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002668 if (!test_bit(HCI_RAW, &hdev->flags)) {
2669 /* LE tx timeout must be longer than maximum
2670 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002671 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002672 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002673 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002674 }
2675
2676 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002677 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002678 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002679 u32 priority = (skb_peek(&chan->data_q))->priority;
2680 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002681 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2682 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002683
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002684 /* Stop if priority has changed */
2685 if (skb->priority < priority)
2686 break;
2687
2688 skb = skb_dequeue(&chan->data_q);
2689
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002690 hci_send_frame(skb);
2691 hdev->le_last_tx = jiffies;
2692
2693 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002694 chan->sent++;
2695 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002696 }
2697 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002698
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002699 if (hdev->le_pkts)
2700 hdev->le_cnt = cnt;
2701 else
2702 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002703
2704 if (cnt != tmp)
2705 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002706}
2707
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002708static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002710 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 struct sk_buff *skb;
2712
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002713 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2714 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715
2716 /* Schedule queues and send stuff to HCI driver */
2717
2718 hci_sched_acl(hdev);
2719
2720 hci_sched_sco(hdev);
2721
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002722 hci_sched_esco(hdev);
2723
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002724 hci_sched_le(hdev);
2725
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 /* Send next queued raw (unknown type) packet */
2727 while ((skb = skb_dequeue(&hdev->raw_q)))
2728 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729}
2730
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002731/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732
2733/* ACL data packet */
2734static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2735{
2736 struct hci_acl_hdr *hdr = (void *) skb->data;
2737 struct hci_conn *conn;
2738 __u16 handle, flags;
2739
2740 skb_pull(skb, HCI_ACL_HDR_SIZE);
2741
2742 handle = __le16_to_cpu(hdr->handle);
2743 flags = hci_flags(handle);
2744 handle = hci_handle(handle);
2745
2746 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2747
2748 hdev->stat.acl_rx++;
2749
2750 hci_dev_lock(hdev);
2751 conn = hci_conn_hash_lookup_handle(hdev, handle);
2752 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002753
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002755 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002756
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002758 l2cap_recv_acldata(conn, skb, flags);
2759 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002761 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 hdev->name, handle);
2763 }
2764
2765 kfree_skb(skb);
2766}
2767
2768/* SCO data packet */
2769static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2770{
2771 struct hci_sco_hdr *hdr = (void *) skb->data;
2772 struct hci_conn *conn;
2773 __u16 handle;
2774
2775 skb_pull(skb, HCI_SCO_HDR_SIZE);
2776
2777 handle = __le16_to_cpu(hdr->handle);
2778
2779 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2780
2781 hdev->stat.sco_rx++;
2782
2783 hci_dev_lock(hdev);
2784 conn = hci_conn_hash_lookup_handle(hdev, handle);
2785 hci_dev_unlock(hdev);
2786
2787 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002789 sco_recv_scodata(conn, skb);
2790 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002792 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 hdev->name, handle);
2794 }
2795
2796 kfree_skb(skb);
2797}
2798
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002799static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002801 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 struct sk_buff *skb;
2803
2804 BT_DBG("%s", hdev->name);
2805
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002807 /* Send copy to monitor */
2808 hci_send_to_monitor(hdev, skb);
2809
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 if (atomic_read(&hdev->promisc)) {
2811 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002812 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 }
2814
2815 if (test_bit(HCI_RAW, &hdev->flags)) {
2816 kfree_skb(skb);
2817 continue;
2818 }
2819
2820 if (test_bit(HCI_INIT, &hdev->flags)) {
2821 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002822 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 case HCI_ACLDATA_PKT:
2824 case HCI_SCODATA_PKT:
2825 kfree_skb(skb);
2826 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002827 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 }
2829
2830 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002831 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002833 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 hci_event_packet(hdev, skb);
2835 break;
2836
2837 case HCI_ACLDATA_PKT:
2838 BT_DBG("%s ACL data packet", hdev->name);
2839 hci_acldata_packet(hdev, skb);
2840 break;
2841
2842 case HCI_SCODATA_PKT:
2843 BT_DBG("%s SCO data packet", hdev->name);
2844 hci_scodata_packet(hdev, skb);
2845 break;
2846
2847 default:
2848 kfree_skb(skb);
2849 break;
2850 }
2851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852}
2853
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002854static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002856 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 struct sk_buff *skb;
2858
2859 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2860
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002862 if (atomic_read(&hdev->cmd_cnt)) {
2863 skb = skb_dequeue(&hdev->cmd_q);
2864 if (!skb)
2865 return;
2866
Wei Yongjun7585b972009-02-25 18:29:52 +08002867 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002869 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2870 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 atomic_dec(&hdev->cmd_cnt);
2872 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002873 if (test_bit(HCI_RESET, &hdev->flags))
2874 del_timer(&hdev->cmd_timer);
2875 else
2876 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002877 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 } else {
2879 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002880 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 }
2882 }
2883}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002884
2885int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2886{
2887 /* General inquiry access code (GIAC) */
2888 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2889 struct hci_cp_inquiry cp;
2890
2891 BT_DBG("%s", hdev->name);
2892
2893 if (test_bit(HCI_INQUIRY, &hdev->flags))
2894 return -EINPROGRESS;
2895
Johan Hedberg46632622012-01-02 16:06:08 +02002896 inquiry_cache_flush(hdev);
2897
Andre Guedes2519a1f2011-11-07 11:45:24 -03002898 memset(&cp, 0, sizeof(cp));
2899 memcpy(&cp.lap, lap, sizeof(cp.lap));
2900 cp.length = length;
2901
2902 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2903}
Andre Guedes023d50492011-11-04 14:16:52 -03002904
2905int hci_cancel_inquiry(struct hci_dev *hdev)
2906{
2907 BT_DBG("%s", hdev->name);
2908
2909 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2910 return -EPERM;
2911
2912 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2913}