blob: db484a8e73649cedd60a952ab42af54a83a7f10d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Marcel Holtmannb78752c2010-08-08 23:06:53 -040057static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020058static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020059static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* ---- HCI notifications ---- */
70
Marcel Holtmann65164552005-10-28 19:20:48 +020071static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Marcel Holtmann040030e2012-02-20 14:50:37 +010073 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76/* ---- HCI requests ---- */
77
Johan Hedberg23bb5762010-12-21 23:01:27 +020078void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Johan Hedberg23bb5762010-12-21 23:01:27 +020080 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
Johan Hedberga5040ef2011-01-10 13:28:59 +020082 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +020086 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88 if (hdev->req_status == HCI_REQ_PEND) {
89 hdev->req_result = result;
90 hdev->req_status = HCI_REQ_DONE;
91 wake_up_interruptible(&hdev->req_wait_q);
92 }
93}
94
95static void hci_req_cancel(struct hci_dev *hdev, int err)
96{
97 BT_DBG("%s err 0x%2.2x", hdev->name, err);
98
99 if (hdev->req_status == HCI_REQ_PEND) {
100 hdev->req_result = err;
101 hdev->req_status = HCI_REQ_CANCELED;
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
106/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900107static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100108 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
110 DECLARE_WAITQUEUE(wait, current);
111 int err = 0;
112
113 BT_DBG("%s start", hdev->name);
114
115 hdev->req_status = HCI_REQ_PEND;
116
117 add_wait_queue(&hdev->req_wait_q, &wait);
118 set_current_state(TASK_INTERRUPTIBLE);
119
120 req(hdev, opt);
121 schedule_timeout(timeout);
122
123 remove_wait_queue(&hdev->req_wait_q, &wait);
124
125 if (signal_pending(current))
126 return -EINTR;
127
128 switch (hdev->req_status) {
129 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700130 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 break;
132
133 case HCI_REQ_CANCELED:
134 err = -hdev->req_result;
135 break;
136
137 default:
138 err = -ETIMEDOUT;
139 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Johan Hedberga5040ef2011-01-10 13:28:59 +0200142 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 BT_DBG("%s end: err %d", hdev->name, err);
145
146 return err;
147}
148
149static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100150 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
152 int ret;
153
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200154 if (!test_bit(HCI_UP, &hdev->flags))
155 return -ENETDOWN;
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 /* Serialize all requests */
158 hci_req_lock(hdev);
159 ret = __hci_request(hdev, req, opt, timeout);
160 hci_req_unlock(hdev);
161
162 return ret;
163}
164
165static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
166{
167 BT_DBG("%s %ld", hdev->name, opt);
168
169 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300170 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200171 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
173
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200174static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200176 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800177 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200178 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200180 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Mandatory initialization */
183
184 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300185 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200193 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200200 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201
202 /* Read Class of Device */
203 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204
205 /* Read Local Name */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200209 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 /* Optional initialization */
212
213 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200214 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200215 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700218 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200220
221 bacpy(&cp.bdaddr, BDADDR_ANY);
222 cp.delete_all = 1;
223 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200226static void amp_init(struct hci_dev *hdev)
227{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200228 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200230 /* Reset */
231 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
232
233 /* Read Local Version */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
235}
236
237static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
238{
239 struct sk_buff *skb;
240
241 BT_DBG("%s %ld", hdev->name, opt);
242
243 /* Driver initialization */
244
245 /* Special commands */
246 while ((skb = skb_dequeue(&hdev->driver_init))) {
247 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
248 skb->dev = (void *) hdev;
249
250 skb_queue_tail(&hdev->cmd_q, skb);
251 queue_work(hdev->workqueue, &hdev->cmd_work);
252 }
253 skb_queue_purge(&hdev->driver_init);
254
255 switch (hdev->dev_type) {
256 case HCI_BREDR:
257 bredr_init(hdev);
258 break;
259
260 case HCI_AMP:
261 amp_init(hdev);
262 break;
263
264 default:
265 BT_ERR("Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269}
270
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300271static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272{
273 BT_DBG("%s", hdev->name);
274
275 /* Read LE buffer size */
276 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277}
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __u8 scan = opt;
282
283 BT_DBG("%s %x", hdev->name, scan);
284
285 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200286 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
289static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 auth = opt;
292
293 BT_DBG("%s %x", hdev->name, auth);
294
295 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200296 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
299static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 encrypt = opt;
302
303 BT_DBG("%s %x", hdev->name, encrypt);
304
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200305 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200309static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __le16 policy = cpu_to_le16(opt);
312
Marcel Holtmanna418b892008-11-30 12:17:28 +0100313 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200314
315 /* Default link policy */
316 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317}
318
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900319/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 * Device is held on return. */
321struct hci_dev *hci_dev_get(int index)
322{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200323 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
325 BT_DBG("%d", index);
326
327 if (index < 0)
328 return NULL;
329
330 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200331 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200342
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200343bool hci_discovery_active(struct hci_dev *hdev)
344{
345 struct discovery_state *discov = &hdev->discovery;
346
Andre Guedes6fbe1952012-02-03 17:47:58 -0300347 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300348 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300349 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200350 return true;
351
Andre Guedes6fbe1952012-02-03 17:47:58 -0300352 default:
353 return false;
354 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200355}
356
Johan Hedbergff9ef572012-01-04 14:23:45 +0200357void hci_discovery_set_state(struct hci_dev *hdev, int state)
358{
359 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
360
361 if (hdev->discovery.state == state)
362 return;
363
364 switch (state) {
365 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300366 if (hdev->discovery.state != DISCOVERY_STARTING)
367 mgmt_discovering(hdev, 0);
Johan Hedbergf963e8e2012-02-20 23:30:44 +0200368 hdev->discovery.type = 0;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200369 break;
370 case DISCOVERY_STARTING:
371 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300372 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200373 mgmt_discovering(hdev, 1);
374 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200375 case DISCOVERY_RESOLVING:
376 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200377 case DISCOVERY_STOPPING:
378 break;
379 }
380
381 hdev->discovery.state = state;
382}
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384static void inquiry_cache_flush(struct hci_dev *hdev)
385{
Johan Hedberg30883512012-01-04 14:16:21 +0200386 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200387 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Johan Hedberg561aafb2012-01-04 13:31:59 +0200389 list_for_each_entry_safe(p, n, &cache->all, all) {
390 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200391 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200393
394 INIT_LIST_HEAD(&cache->unknown);
395 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200396 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
398
399struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
400{
Johan Hedberg30883512012-01-04 14:16:21 +0200401 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 struct inquiry_entry *e;
403
404 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
405
Johan Hedberg561aafb2012-01-04 13:31:59 +0200406 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200408 return e;
409 }
410
411 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412}
413
Johan Hedberg561aafb2012-01-04 13:31:59 +0200414struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
415 bdaddr_t *bdaddr)
416{
Johan Hedberg30883512012-01-04 14:16:21 +0200417 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
422 list_for_each_entry(e, &cache->unknown, list) {
423 if (!bacmp(&e->data.bdaddr, bdaddr))
424 return e;
425 }
426
427 return NULL;
428}
429
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200430struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
431 bdaddr_t *bdaddr,
432 int state)
433{
434 struct discovery_state *cache = &hdev->discovery;
435 struct inquiry_entry *e;
436
437 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
438
439 list_for_each_entry(e, &cache->resolve, list) {
440 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
441 return e;
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
Johan Hedberga3d4e202012-01-09 00:53:02 +0200449void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
450 struct inquiry_entry *ie)
451{
452 struct discovery_state *cache = &hdev->discovery;
453 struct list_head *pos = &cache->resolve;
454 struct inquiry_entry *p;
455
456 list_del(&ie->list);
457
458 list_for_each_entry(p, &cache->resolve, list) {
459 if (p->name_state != NAME_PENDING &&
460 abs(p->data.rssi) >= abs(ie->data.rssi))
461 break;
462 pos = &p->list;
463 }
464
465 list_add(&ie->list, pos);
466}
467
Johan Hedberg31754052012-01-04 13:39:52 +0200468bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200469 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
Johan Hedberg30883512012-01-04 14:16:21 +0200471 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200472 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
475
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200476 if (ssp)
477 *ssp = data->ssp_mode;
478
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200479 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200480 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200481 if (ie->data.ssp_mode && ssp)
482 *ssp = true;
483
Johan Hedberga3d4e202012-01-09 00:53:02 +0200484 if (ie->name_state == NAME_NEEDED &&
485 data->rssi != ie->data.rssi) {
486 ie->data.rssi = data->rssi;
487 hci_inquiry_cache_update_resolve(hdev, ie);
488 }
489
Johan Hedberg561aafb2012-01-04 13:31:59 +0200490 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200491 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200492
Johan Hedberg561aafb2012-01-04 13:31:59 +0200493 /* Entry not in the cache. Add new one. */
494 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
495 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200496 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200497
498 list_add(&ie->all, &cache->all);
499
500 if (name_known) {
501 ie->name_state = NAME_KNOWN;
502 } else {
503 ie->name_state = NAME_NOT_KNOWN;
504 list_add(&ie->list, &cache->unknown);
505 }
506
507update:
508 if (name_known && ie->name_state != NAME_KNOWN &&
509 ie->name_state != NAME_PENDING) {
510 ie->name_state = NAME_KNOWN;
511 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 }
513
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200514 memcpy(&ie->data, data, sizeof(*data));
515 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200517
518 if (ie->name_state == NAME_NOT_KNOWN)
519 return false;
520
521 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
524static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
525{
Johan Hedberg30883512012-01-04 14:16:21 +0200526 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 struct inquiry_info *info = (struct inquiry_info *) buf;
528 struct inquiry_entry *e;
529 int copied = 0;
530
Johan Hedberg561aafb2012-01-04 13:31:59 +0200531 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200533
534 if (copied >= num)
535 break;
536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 bacpy(&info->bdaddr, &data->bdaddr);
538 info->pscan_rep_mode = data->pscan_rep_mode;
539 info->pscan_period_mode = data->pscan_period_mode;
540 info->pscan_mode = data->pscan_mode;
541 memcpy(info->dev_class, data->dev_class, 3);
542 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200545 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
547
548 BT_DBG("cache %p, copied %d", cache, copied);
549 return copied;
550}
551
552static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
553{
554 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
555 struct hci_cp_inquiry cp;
556
557 BT_DBG("%s", hdev->name);
558
559 if (test_bit(HCI_INQUIRY, &hdev->flags))
560 return;
561
562 /* Start Inquiry */
563 memcpy(&cp.lap, &ir->lap, 3);
564 cp.length = ir->length;
565 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200566 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567}
568
569int hci_inquiry(void __user *arg)
570{
571 __u8 __user *ptr = arg;
572 struct hci_inquiry_req ir;
573 struct hci_dev *hdev;
574 int err = 0, do_inquiry = 0, max_rsp;
575 long timeo;
576 __u8 *buf;
577
578 if (copy_from_user(&ir, ptr, sizeof(ir)))
579 return -EFAULT;
580
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200581 hdev = hci_dev_get(ir.dev_id);
582 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 return -ENODEV;
584
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300585 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900586 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200587 inquiry_cache_empty(hdev) ||
588 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 inquiry_cache_flush(hdev);
590 do_inquiry = 1;
591 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300592 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Marcel Holtmann04837f62006-07-03 10:02:33 +0200594 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200595
596 if (do_inquiry) {
597 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
598 if (err < 0)
599 goto done;
600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602 /* for unlimited number of responses we will use buffer with 255 entries */
603 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
604
605 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
606 * copy it to the user space.
607 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100608 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200609 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 err = -ENOMEM;
611 goto done;
612 }
613
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300614 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300616 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
618 BT_DBG("num_rsp %d", ir.num_rsp);
619
620 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
621 ptr += sizeof(ir);
622 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
623 ir.num_rsp))
624 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900625 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 err = -EFAULT;
627
628 kfree(buf);
629
630done:
631 hci_dev_put(hdev);
632 return err;
633}
634
635/* ---- HCI ioctl helpers ---- */
636
637int hci_dev_open(__u16 dev)
638{
639 struct hci_dev *hdev;
640 int ret = 0;
641
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200642 hdev = hci_dev_get(dev);
643 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 return -ENODEV;
645
646 BT_DBG("%s %p", hdev->name, hdev);
647
648 hci_req_lock(hdev);
649
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200650 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
651 ret = -ERFKILL;
652 goto done;
653 }
654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 if (test_bit(HCI_UP, &hdev->flags)) {
656 ret = -EALREADY;
657 goto done;
658 }
659
660 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
661 set_bit(HCI_RAW, &hdev->flags);
662
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200663 /* Treat all non BR/EDR controllers as raw devices if
664 enable_hs is not set */
665 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100666 set_bit(HCI_RAW, &hdev->flags);
667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (hdev->open(hdev)) {
669 ret = -EIO;
670 goto done;
671 }
672
673 if (!test_bit(HCI_RAW, &hdev->flags)) {
674 atomic_set(&hdev->cmd_cnt, 1);
675 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200676 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Marcel Holtmann04837f62006-07-03 10:02:33 +0200678 ret = __hci_request(hdev, hci_init_req, 0,
679 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Andre Guedeseead27d2011-06-30 19:20:55 -0300681 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300682 ret = __hci_request(hdev, hci_le_init_req, 0,
683 msecs_to_jiffies(HCI_INIT_TIMEOUT));
684
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 clear_bit(HCI_INIT, &hdev->flags);
686 }
687
688 if (!ret) {
689 hci_dev_hold(hdev);
690 set_bit(HCI_UP, &hdev->flags);
691 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200692 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300693 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200694 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300695 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200696 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900697 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200699 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200700 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400701 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
703 skb_queue_purge(&hdev->cmd_q);
704 skb_queue_purge(&hdev->rx_q);
705
706 if (hdev->flush)
707 hdev->flush(hdev);
708
709 if (hdev->sent_cmd) {
710 kfree_skb(hdev->sent_cmd);
711 hdev->sent_cmd = NULL;
712 }
713
714 hdev->close(hdev);
715 hdev->flags = 0;
716 }
717
718done:
719 hci_req_unlock(hdev);
720 hci_dev_put(hdev);
721 return ret;
722}
723
724static int hci_dev_do_close(struct hci_dev *hdev)
725{
726 BT_DBG("%s %p", hdev->name, hdev);
727
Andre Guedes28b75a82012-02-03 17:48:00 -0300728 cancel_work_sync(&hdev->le_scan);
729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 hci_req_cancel(hdev, ENODEV);
731 hci_req_lock(hdev);
732
733 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300734 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 hci_req_unlock(hdev);
736 return 0;
737 }
738
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200739 /* Flush RX and TX works */
740 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400741 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200743 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200744 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200745 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200746 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200747 }
748
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200749 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200750 cancel_delayed_work(&hdev->service_cache);
751
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300752 cancel_delayed_work_sync(&hdev->le_scan_disable);
753
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300754 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 inquiry_cache_flush(hdev);
756 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300757 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
759 hci_notify(hdev, HCI_DEV_DOWN);
760
761 if (hdev->flush)
762 hdev->flush(hdev);
763
764 /* Reset device */
765 skb_queue_purge(&hdev->cmd_q);
766 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200767 if (!test_bit(HCI_RAW, &hdev->flags) &&
768 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200770 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200771 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 clear_bit(HCI_INIT, &hdev->flags);
773 }
774
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200775 /* flush cmd work */
776 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 /* Drop queues */
779 skb_queue_purge(&hdev->rx_q);
780 skb_queue_purge(&hdev->cmd_q);
781 skb_queue_purge(&hdev->raw_q);
782
783 /* Drop last sent command */
784 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300785 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 kfree_skb(hdev->sent_cmd);
787 hdev->sent_cmd = NULL;
788 }
789
790 /* After this point our queues are empty
791 * and no tasks are scheduled. */
792 hdev->close(hdev);
793
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100794 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
795 hci_dev_lock(hdev);
796 mgmt_powered(hdev, 0);
797 hci_dev_unlock(hdev);
798 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200799
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 /* Clear flags */
801 hdev->flags = 0;
802
Johan Hedberge59fda82012-02-22 18:11:53 +0200803 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200804 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 hci_req_unlock(hdev);
807
808 hci_dev_put(hdev);
809 return 0;
810}
811
812int hci_dev_close(__u16 dev)
813{
814 struct hci_dev *hdev;
815 int err;
816
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200817 hdev = hci_dev_get(dev);
818 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100820
821 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
822 cancel_delayed_work(&hdev->power_off);
823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 hci_dev_put(hdev);
827 return err;
828}
829
830int hci_dev_reset(__u16 dev)
831{
832 struct hci_dev *hdev;
833 int ret = 0;
834
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200835 hdev = hci_dev_get(dev);
836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return -ENODEV;
838
839 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
841 if (!test_bit(HCI_UP, &hdev->flags))
842 goto done;
843
844 /* Drop queues */
845 skb_queue_purge(&hdev->rx_q);
846 skb_queue_purge(&hdev->cmd_q);
847
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300848 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 inquiry_cache_flush(hdev);
850 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300851 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
853 if (hdev->flush)
854 hdev->flush(hdev);
855
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900856 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300857 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
859 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200860 ret = __hci_request(hdev, hci_reset_req, 0,
861 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 hci_req_unlock(hdev);
865 hci_dev_put(hdev);
866 return ret;
867}
868
869int hci_dev_reset_stat(__u16 dev)
870{
871 struct hci_dev *hdev;
872 int ret = 0;
873
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200874 hdev = hci_dev_get(dev);
875 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 return -ENODEV;
877
878 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
879
880 hci_dev_put(hdev);
881
882 return ret;
883}
884
885int hci_dev_cmd(unsigned int cmd, void __user *arg)
886{
887 struct hci_dev *hdev;
888 struct hci_dev_req dr;
889 int err = 0;
890
891 if (copy_from_user(&dr, arg, sizeof(dr)))
892 return -EFAULT;
893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200894 hdev = hci_dev_get(dr.dev_id);
895 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return -ENODEV;
897
898 switch (cmd) {
899 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200900 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
901 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 break;
903
904 case HCISETENCRYPT:
905 if (!lmp_encrypt_capable(hdev)) {
906 err = -EOPNOTSUPP;
907 break;
908 }
909
910 if (!test_bit(HCI_AUTH, &hdev->flags)) {
911 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200912 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
913 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 if (err)
915 break;
916 }
917
Marcel Holtmann04837f62006-07-03 10:02:33 +0200918 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 break;
921
922 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200923 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 break;
926
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200927 case HCISETLINKPOL:
928 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
929 msecs_to_jiffies(HCI_INIT_TIMEOUT));
930 break;
931
932 case HCISETLINKMODE:
933 hdev->link_mode = ((__u16) dr.dev_opt) &
934 (HCI_LM_MASTER | HCI_LM_ACCEPT);
935 break;
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 case HCISETPTYPE:
938 hdev->pkt_type = (__u16) dr.dev_opt;
939 break;
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200942 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
943 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 break;
945
946 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200947 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
948 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 break;
950
951 default:
952 err = -EINVAL;
953 break;
954 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 hci_dev_put(hdev);
957 return err;
958}
959
960int hci_get_dev_list(void __user *arg)
961{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200962 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 struct hci_dev_list_req *dl;
964 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 int n = 0, size, err;
966 __u16 dev_num;
967
968 if (get_user(dev_num, (__u16 __user *) arg))
969 return -EFAULT;
970
971 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
972 return -EINVAL;
973
974 size = sizeof(*dl) + dev_num * sizeof(*dr);
975
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200976 dl = kzalloc(size, GFP_KERNEL);
977 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 return -ENOMEM;
979
980 dr = dl->dev_req;
981
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200982 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200983 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200984 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200985 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200986
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
988 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 (dr + n)->dev_id = hdev->id;
991 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200992
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 if (++n >= dev_num)
994 break;
995 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200996 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 dl->dev_num = n;
999 size = sizeof(*dl) + n * sizeof(*dr);
1000
1001 err = copy_to_user(arg, dl, size);
1002 kfree(dl);
1003
1004 return err ? -EFAULT : 0;
1005}
1006
1007int hci_get_dev_info(void __user *arg)
1008{
1009 struct hci_dev *hdev;
1010 struct hci_dev_info di;
1011 int err = 0;
1012
1013 if (copy_from_user(&di, arg, sizeof(di)))
1014 return -EFAULT;
1015
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001016 hdev = hci_dev_get(di.dev_id);
1017 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 return -ENODEV;
1019
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001020 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001021 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001022
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001023 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1024 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 strcpy(di.name, hdev->name);
1027 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001028 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 di.flags = hdev->flags;
1030 di.pkt_type = hdev->pkt_type;
1031 di.acl_mtu = hdev->acl_mtu;
1032 di.acl_pkts = hdev->acl_pkts;
1033 di.sco_mtu = hdev->sco_mtu;
1034 di.sco_pkts = hdev->sco_pkts;
1035 di.link_policy = hdev->link_policy;
1036 di.link_mode = hdev->link_mode;
1037
1038 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1039 memcpy(&di.features, &hdev->features, sizeof(di.features));
1040
1041 if (copy_to_user(arg, &di, sizeof(di)))
1042 err = -EFAULT;
1043
1044 hci_dev_put(hdev);
1045
1046 return err;
1047}
1048
1049/* ---- Interface to HCI drivers ---- */
1050
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001051static int hci_rfkill_set_block(void *data, bool blocked)
1052{
1053 struct hci_dev *hdev = data;
1054
1055 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1056
1057 if (!blocked)
1058 return 0;
1059
1060 hci_dev_do_close(hdev);
1061
1062 return 0;
1063}
1064
1065static const struct rfkill_ops hci_rfkill_ops = {
1066 .set_block = hci_rfkill_set_block,
1067};
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069/* Alloc HCI device */
1070struct hci_dev *hci_alloc_dev(void)
1071{
1072 struct hci_dev *hdev;
1073
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001074 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 if (!hdev)
1076 return NULL;
1077
David Herrmann0ac7e702011-10-08 14:58:47 +02001078 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 skb_queue_head_init(&hdev->driver_init);
1080
1081 return hdev;
1082}
1083EXPORT_SYMBOL(hci_alloc_dev);
1084
1085/* Free HCI device */
1086void hci_free_dev(struct hci_dev *hdev)
1087{
1088 skb_queue_purge(&hdev->driver_init);
1089
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001090 /* will free via device release */
1091 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092}
1093EXPORT_SYMBOL(hci_free_dev);
1094
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001095static void hci_power_on(struct work_struct *work)
1096{
1097 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1098
1099 BT_DBG("%s", hdev->name);
1100
1101 if (hci_dev_open(hdev->id) < 0)
1102 return;
1103
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001105 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001106 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001107
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001109 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001110}
1111
1112static void hci_power_off(struct work_struct *work)
1113{
Johan Hedberg32435532011-11-07 22:16:04 +02001114 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001116
1117 BT_DBG("%s", hdev->name);
1118
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001119 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001120}
1121
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001122static void hci_discov_off(struct work_struct *work)
1123{
1124 struct hci_dev *hdev;
1125 u8 scan = SCAN_PAGE;
1126
1127 hdev = container_of(work, struct hci_dev, discov_off.work);
1128
1129 BT_DBG("%s", hdev->name);
1130
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001131 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001132
1133 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1134
1135 hdev->discov_timeout = 0;
1136
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001137 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001138}
1139
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001140int hci_uuids_clear(struct hci_dev *hdev)
1141{
1142 struct list_head *p, *n;
1143
1144 list_for_each_safe(p, n, &hdev->uuids) {
1145 struct bt_uuid *uuid;
1146
1147 uuid = list_entry(p, struct bt_uuid, list);
1148
1149 list_del(p);
1150 kfree(uuid);
1151 }
1152
1153 return 0;
1154}
1155
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001156int hci_link_keys_clear(struct hci_dev *hdev)
1157{
1158 struct list_head *p, *n;
1159
1160 list_for_each_safe(p, n, &hdev->link_keys) {
1161 struct link_key *key;
1162
1163 key = list_entry(p, struct link_key, list);
1164
1165 list_del(p);
1166 kfree(key);
1167 }
1168
1169 return 0;
1170}
1171
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001172int hci_smp_ltks_clear(struct hci_dev *hdev)
1173{
1174 struct smp_ltk *k, *tmp;
1175
1176 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1177 list_del(&k->list);
1178 kfree(k);
1179 }
1180
1181 return 0;
1182}
1183
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001184struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1185{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001186 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001187
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001188 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001189 if (bacmp(bdaddr, &k->bdaddr) == 0)
1190 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001191
1192 return NULL;
1193}
1194
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001195static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 u8 key_type, u8 old_key_type)
1197{
1198 /* Legacy key */
1199 if (key_type < 0x03)
1200 return 1;
1201
1202 /* Debug keys are insecure so don't store them persistently */
1203 if (key_type == HCI_LK_DEBUG_COMBINATION)
1204 return 0;
1205
1206 /* Changed combination key and there's no previous one */
1207 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1208 return 0;
1209
1210 /* Security mode 3 case */
1211 if (!conn)
1212 return 1;
1213
1214 /* Neither local nor remote side had no-bonding as requirement */
1215 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1216 return 1;
1217
1218 /* Local side had dedicated bonding as requirement */
1219 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1220 return 1;
1221
1222 /* Remote side had dedicated bonding as requirement */
1223 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1224 return 1;
1225
1226 /* If none of the above criteria match, then don't store the key
1227 * persistently */
1228 return 0;
1229}
1230
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001231struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001232{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001233 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001238 continue;
1239
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001240 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001241 }
1242
1243 return NULL;
1244}
1245EXPORT_SYMBOL(hci_find_ltk);
1246
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001247struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1248 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001249{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001250 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001251
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001252 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001255 return k;
1256
1257 return NULL;
1258}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001259EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001260
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001261int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001263{
1264 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001265 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001266
1267 old_key = hci_find_link_key(hdev, bdaddr);
1268 if (old_key) {
1269 old_key_type = old_key->type;
1270 key = old_key;
1271 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001272 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001273 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1274 if (!key)
1275 return -ENOMEM;
1276 list_add(&key->list, &hdev->link_keys);
1277 }
1278
1279 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1280
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001281 /* Some buggy controller combinations generate a changed
1282 * combination key for legacy pairing even when there's no
1283 * previous key */
1284 if (type == HCI_LK_CHANGED_COMBINATION &&
1285 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001286 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001287 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001288 if (conn)
1289 conn->key_type = type;
1290 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001291
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001292 bacpy(&key->bdaddr, bdaddr);
1293 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001294 key->pin_len = pin_len;
1295
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001296 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001297 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001298 else
1299 key->type = type;
1300
Johan Hedberg4df378a2011-04-28 11:29:03 -07001301 if (!new_key)
1302 return 0;
1303
1304 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1305
Johan Hedberg744cf192011-11-08 20:40:14 +02001306 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001307
1308 if (!persistent) {
1309 list_del(&key->list);
1310 kfree(key);
1311 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001312
1313 return 0;
1314}
1315
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001316int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1317 int new_key, u8 authenticated, u8 tk[16],
1318 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001319{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001320 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001321
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001322 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1323 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001325 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1326 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001327 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001328 else {
1329 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001330 if (!key)
1331 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001332 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001333 }
1334
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001335 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001336 key->bdaddr_type = addr_type;
1337 memcpy(key->val, tk, sizeof(key->val));
1338 key->authenticated = authenticated;
1339 key->ediv = ediv;
1340 key->enc_size = enc_size;
1341 key->type = type;
1342 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001343
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001344 if (!new_key)
1345 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001346
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001347 if (type & HCI_SMP_LTK)
1348 mgmt_new_ltk(hdev, key, 1);
1349
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001350 return 0;
1351}
1352
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001353int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1354{
1355 struct link_key *key;
1356
1357 key = hci_find_link_key(hdev, bdaddr);
1358 if (!key)
1359 return -ENOENT;
1360
1361 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1362
1363 list_del(&key->list);
1364 kfree(key);
1365
1366 return 0;
1367}
1368
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001369int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1370{
1371 struct smp_ltk *k, *tmp;
1372
1373 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1374 if (bacmp(bdaddr, &k->bdaddr))
1375 continue;
1376
1377 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1378
1379 list_del(&k->list);
1380 kfree(k);
1381 }
1382
1383 return 0;
1384}
1385
Ville Tervo6bd32322011-02-16 16:32:41 +02001386/* HCI command timer function */
1387static void hci_cmd_timer(unsigned long arg)
1388{
1389 struct hci_dev *hdev = (void *) arg;
1390
1391 BT_ERR("%s command tx timeout", hdev->name);
1392 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001393 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001394}
1395
Szymon Janc2763eda2011-03-22 13:12:22 +01001396struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1397 bdaddr_t *bdaddr)
1398{
1399 struct oob_data *data;
1400
1401 list_for_each_entry(data, &hdev->remote_oob_data, list)
1402 if (bacmp(bdaddr, &data->bdaddr) == 0)
1403 return data;
1404
1405 return NULL;
1406}
1407
1408int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1409{
1410 struct oob_data *data;
1411
1412 data = hci_find_remote_oob_data(hdev, bdaddr);
1413 if (!data)
1414 return -ENOENT;
1415
1416 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1417
1418 list_del(&data->list);
1419 kfree(data);
1420
1421 return 0;
1422}
1423
1424int hci_remote_oob_data_clear(struct hci_dev *hdev)
1425{
1426 struct oob_data *data, *n;
1427
1428 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1429 list_del(&data->list);
1430 kfree(data);
1431 }
1432
1433 return 0;
1434}
1435
1436int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1437 u8 *randomizer)
1438{
1439 struct oob_data *data;
1440
1441 data = hci_find_remote_oob_data(hdev, bdaddr);
1442
1443 if (!data) {
1444 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1445 if (!data)
1446 return -ENOMEM;
1447
1448 bacpy(&data->bdaddr, bdaddr);
1449 list_add(&data->list, &hdev->remote_oob_data);
1450 }
1451
1452 memcpy(data->hash, hash, sizeof(data->hash));
1453 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1454
1455 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1456
1457 return 0;
1458}
1459
Antti Julkub2a66aa2011-06-15 12:01:14 +03001460struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1461 bdaddr_t *bdaddr)
1462{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001463 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001464
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001465 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001466 if (bacmp(bdaddr, &b->bdaddr) == 0)
1467 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001468
1469 return NULL;
1470}
1471
1472int hci_blacklist_clear(struct hci_dev *hdev)
1473{
1474 struct list_head *p, *n;
1475
1476 list_for_each_safe(p, n, &hdev->blacklist) {
1477 struct bdaddr_list *b;
1478
1479 b = list_entry(p, struct bdaddr_list, list);
1480
1481 list_del(p);
1482 kfree(b);
1483 }
1484
1485 return 0;
1486}
1487
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001488int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001489{
1490 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001491
1492 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1493 return -EBADF;
1494
Antti Julku5e762442011-08-25 16:48:02 +03001495 if (hci_blacklist_lookup(hdev, bdaddr))
1496 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001497
1498 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001499 if (!entry)
1500 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001501
1502 bacpy(&entry->bdaddr, bdaddr);
1503
1504 list_add(&entry->list, &hdev->blacklist);
1505
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001506 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001507}
1508
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001509int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001510{
1511 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001512
Szymon Janc1ec918c2011-11-16 09:32:21 +01001513 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001514 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001515
1516 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001517 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001518 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001519
1520 list_del(&entry->list);
1521 kfree(entry);
1522
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001523 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001524}
1525
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001526static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001527{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001528 struct hci_dev *hdev = container_of(work, struct hci_dev,
1529 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001530
1531 hci_dev_lock(hdev);
1532
1533 hci_adv_entries_clear(hdev);
1534
1535 hci_dev_unlock(hdev);
1536}
1537
Andre Guedes76c86862011-05-26 16:23:50 -03001538int hci_adv_entries_clear(struct hci_dev *hdev)
1539{
1540 struct adv_entry *entry, *tmp;
1541
1542 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1543 list_del(&entry->list);
1544 kfree(entry);
1545 }
1546
1547 BT_DBG("%s adv cache cleared", hdev->name);
1548
1549 return 0;
1550}
1551
1552struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1553{
1554 struct adv_entry *entry;
1555
1556 list_for_each_entry(entry, &hdev->adv_entries, list)
1557 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1558 return entry;
1559
1560 return NULL;
1561}
1562
1563static inline int is_connectable_adv(u8 evt_type)
1564{
1565 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1566 return 1;
1567
1568 return 0;
1569}
1570
1571int hci_add_adv_entry(struct hci_dev *hdev,
1572 struct hci_ev_le_advertising_info *ev)
1573{
1574 struct adv_entry *entry;
1575
1576 if (!is_connectable_adv(ev->evt_type))
1577 return -EINVAL;
1578
1579 /* Only new entries should be added to adv_entries. So, if
1580 * bdaddr was found, don't add it. */
1581 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1582 return 0;
1583
Andre Guedes4777bfd2012-01-30 23:31:28 -03001584 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001585 if (!entry)
1586 return -ENOMEM;
1587
1588 bacpy(&entry->bdaddr, &ev->bdaddr);
1589 entry->bdaddr_type = ev->bdaddr_type;
1590
1591 list_add(&entry->list, &hdev->adv_entries);
1592
1593 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1594 batostr(&entry->bdaddr), entry->bdaddr_type);
1595
1596 return 0;
1597}
1598
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001599static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1600{
1601 struct le_scan_params *param = (struct le_scan_params *) opt;
1602 struct hci_cp_le_set_scan_param cp;
1603
1604 memset(&cp, 0, sizeof(cp));
1605 cp.type = param->type;
1606 cp.interval = cpu_to_le16(param->interval);
1607 cp.window = cpu_to_le16(param->window);
1608
1609 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1610}
1611
1612static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1613{
1614 struct hci_cp_le_set_scan_enable cp;
1615
1616 memset(&cp, 0, sizeof(cp));
1617 cp.enable = 1;
1618
1619 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1620}
1621
1622static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1623 u16 window, int timeout)
1624{
1625 long timeo = msecs_to_jiffies(3000);
1626 struct le_scan_params param;
1627 int err;
1628
1629 BT_DBG("%s", hdev->name);
1630
1631 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1632 return -EINPROGRESS;
1633
1634 param.type = type;
1635 param.interval = interval;
1636 param.window = window;
1637
1638 hci_req_lock(hdev);
1639
1640 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1641 timeo);
1642 if (!err)
1643 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1644
1645 hci_req_unlock(hdev);
1646
1647 if (err < 0)
1648 return err;
1649
1650 schedule_delayed_work(&hdev->le_scan_disable,
1651 msecs_to_jiffies(timeout));
1652
1653 return 0;
1654}
1655
1656static void le_scan_disable_work(struct work_struct *work)
1657{
1658 struct hci_dev *hdev = container_of(work, struct hci_dev,
1659 le_scan_disable.work);
1660 struct hci_cp_le_set_scan_enable cp;
1661
1662 BT_DBG("%s", hdev->name);
1663
1664 memset(&cp, 0, sizeof(cp));
1665
1666 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1667}
1668
Andre Guedes28b75a82012-02-03 17:48:00 -03001669static void le_scan_work(struct work_struct *work)
1670{
1671 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1672 struct le_scan_params *param = &hdev->le_scan_params;
1673
1674 BT_DBG("%s", hdev->name);
1675
1676 hci_do_le_scan(hdev, param->type, param->interval,
1677 param->window, param->timeout);
1678}
1679
1680int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1681 int timeout)
1682{
1683 struct le_scan_params *param = &hdev->le_scan_params;
1684
1685 BT_DBG("%s", hdev->name);
1686
1687 if (work_busy(&hdev->le_scan))
1688 return -EINPROGRESS;
1689
1690 param->type = type;
1691 param->interval = interval;
1692 param->window = window;
1693 param->timeout = timeout;
1694
1695 queue_work(system_long_wq, &hdev->le_scan);
1696
1697 return 0;
1698}
1699
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700/* Register HCI device */
1701int hci_register_dev(struct hci_dev *hdev)
1702{
1703 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001704 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001706 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
David Herrmann010666a2012-01-07 15:47:07 +01001708 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 return -EINVAL;
1710
Mat Martineau08add512011-11-02 16:18:36 -07001711 /* Do not allow HCI_AMP devices to register at index 0,
1712 * so the index can be used as the AMP controller ID.
1713 */
1714 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1715
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001716 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 /* Find first available device id */
1719 list_for_each(p, &hci_dev_list) {
1720 if (list_entry(p, struct hci_dev, list)->id != id)
1721 break;
1722 head = p; id++;
1723 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001724
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 sprintf(hdev->name, "hci%d", id);
1726 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001727 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001729 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
1731 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001732 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001734 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001736 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
Marcel Holtmann04837f62006-07-03 10:02:33 +02001738 hdev->idle_timeout = 0;
1739 hdev->sniff_max_interval = 800;
1740 hdev->sniff_min_interval = 80;
1741
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001742 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001743 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001744 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001745
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
1747 skb_queue_head_init(&hdev->rx_q);
1748 skb_queue_head_init(&hdev->cmd_q);
1749 skb_queue_head_init(&hdev->raw_q);
1750
Ville Tervo6bd32322011-02-16 16:32:41 +02001751 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1752
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301753 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001754 hdev->reassembly[i] = NULL;
1755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001757 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
Johan Hedberg30883512012-01-04 14:16:21 +02001759 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
1761 hci_conn_hash_init(hdev);
1762
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001763 INIT_LIST_HEAD(&hdev->mgmt_pending);
1764
David Millerea4bd8b2010-07-30 21:54:49 -07001765 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001766
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001767 INIT_LIST_HEAD(&hdev->uuids);
1768
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001769 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001770 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001771
Szymon Janc2763eda2011-03-22 13:12:22 +01001772 INIT_LIST_HEAD(&hdev->remote_oob_data);
1773
Andre Guedes76c86862011-05-26 16:23:50 -03001774 INIT_LIST_HEAD(&hdev->adv_entries);
1775
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001776 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001777 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001778 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001779
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001780 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1783
1784 atomic_set(&hdev->promisc, 0);
1785
Andre Guedes28b75a82012-02-03 17:48:00 -03001786 INIT_WORK(&hdev->le_scan, le_scan_work);
1787
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001788 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1789
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001790 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001792 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1793 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001794 if (!hdev->workqueue) {
1795 error = -ENOMEM;
1796 goto err;
1797 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001798
David Herrmann33ca9542011-10-08 14:58:49 +02001799 error = hci_add_sysfs(hdev);
1800 if (error < 0)
1801 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001803 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1804 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1805 if (hdev->rfkill) {
1806 if (rfkill_register(hdev->rfkill) < 0) {
1807 rfkill_destroy(hdev->rfkill);
1808 hdev->rfkill = NULL;
1809 }
1810 }
1811
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001812 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1813 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001814 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001815
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001817 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
1819 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001820
David Herrmann33ca9542011-10-08 14:58:49 +02001821err_wqueue:
1822 destroy_workqueue(hdev->workqueue);
1823err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001824 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001825 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001826 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001827
David Herrmann33ca9542011-10-08 14:58:49 +02001828 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829}
1830EXPORT_SYMBOL(hci_register_dev);
1831
1832/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001833void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834{
Marcel Holtmannef222012007-07-11 06:42:04 +02001835 int i;
1836
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001837 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001839 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001841 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
1843 hci_dev_do_close(hdev);
1844
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301845 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001846 kfree_skb(hdev->reassembly[i]);
1847
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001848 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001849 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001850 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001851 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001852 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001853 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001854
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001855 /* mgmt_index_removed should take care of emptying the
1856 * pending list */
1857 BUG_ON(!list_empty(&hdev->mgmt_pending));
1858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 hci_notify(hdev, HCI_DEV_UNREG);
1860
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001861 if (hdev->rfkill) {
1862 rfkill_unregister(hdev->rfkill);
1863 rfkill_destroy(hdev->rfkill);
1864 }
1865
David Herrmannce242972011-10-08 14:58:48 +02001866 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001867
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001868 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001869
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001870 destroy_workqueue(hdev->workqueue);
1871
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001872 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001873 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001874 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001875 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001876 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001877 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001878 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001879 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001880
David Herrmanndc946bd2012-01-07 15:47:24 +01001881 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882}
1883EXPORT_SYMBOL(hci_unregister_dev);
1884
1885/* Suspend HCI device */
1886int hci_suspend_dev(struct hci_dev *hdev)
1887{
1888 hci_notify(hdev, HCI_DEV_SUSPEND);
1889 return 0;
1890}
1891EXPORT_SYMBOL(hci_suspend_dev);
1892
1893/* Resume HCI device */
1894int hci_resume_dev(struct hci_dev *hdev)
1895{
1896 hci_notify(hdev, HCI_DEV_RESUME);
1897 return 0;
1898}
1899EXPORT_SYMBOL(hci_resume_dev);
1900
Marcel Holtmann76bca882009-11-18 00:40:39 +01001901/* Receive frame from HCI drivers */
1902int hci_recv_frame(struct sk_buff *skb)
1903{
1904 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1905 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1906 && !test_bit(HCI_INIT, &hdev->flags))) {
1907 kfree_skb(skb);
1908 return -ENXIO;
1909 }
1910
1911 /* Incomming skb */
1912 bt_cb(skb)->incoming = 1;
1913
1914 /* Time stamp */
1915 __net_timestamp(skb);
1916
Marcel Holtmann76bca882009-11-18 00:40:39 +01001917 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001918 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001919
Marcel Holtmann76bca882009-11-18 00:40:39 +01001920 return 0;
1921}
1922EXPORT_SYMBOL(hci_recv_frame);
1923
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301924static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001925 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301926{
1927 int len = 0;
1928 int hlen = 0;
1929 int remain = count;
1930 struct sk_buff *skb;
1931 struct bt_skb_cb *scb;
1932
1933 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1934 index >= NUM_REASSEMBLY)
1935 return -EILSEQ;
1936
1937 skb = hdev->reassembly[index];
1938
1939 if (!skb) {
1940 switch (type) {
1941 case HCI_ACLDATA_PKT:
1942 len = HCI_MAX_FRAME_SIZE;
1943 hlen = HCI_ACL_HDR_SIZE;
1944 break;
1945 case HCI_EVENT_PKT:
1946 len = HCI_MAX_EVENT_SIZE;
1947 hlen = HCI_EVENT_HDR_SIZE;
1948 break;
1949 case HCI_SCODATA_PKT:
1950 len = HCI_MAX_SCO_SIZE;
1951 hlen = HCI_SCO_HDR_SIZE;
1952 break;
1953 }
1954
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001955 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301956 if (!skb)
1957 return -ENOMEM;
1958
1959 scb = (void *) skb->cb;
1960 scb->expect = hlen;
1961 scb->pkt_type = type;
1962
1963 skb->dev = (void *) hdev;
1964 hdev->reassembly[index] = skb;
1965 }
1966
1967 while (count) {
1968 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001969 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301970
1971 memcpy(skb_put(skb, len), data, len);
1972
1973 count -= len;
1974 data += len;
1975 scb->expect -= len;
1976 remain = count;
1977
1978 switch (type) {
1979 case HCI_EVENT_PKT:
1980 if (skb->len == HCI_EVENT_HDR_SIZE) {
1981 struct hci_event_hdr *h = hci_event_hdr(skb);
1982 scb->expect = h->plen;
1983
1984 if (skb_tailroom(skb) < scb->expect) {
1985 kfree_skb(skb);
1986 hdev->reassembly[index] = NULL;
1987 return -ENOMEM;
1988 }
1989 }
1990 break;
1991
1992 case HCI_ACLDATA_PKT:
1993 if (skb->len == HCI_ACL_HDR_SIZE) {
1994 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1995 scb->expect = __le16_to_cpu(h->dlen);
1996
1997 if (skb_tailroom(skb) < scb->expect) {
1998 kfree_skb(skb);
1999 hdev->reassembly[index] = NULL;
2000 return -ENOMEM;
2001 }
2002 }
2003 break;
2004
2005 case HCI_SCODATA_PKT:
2006 if (skb->len == HCI_SCO_HDR_SIZE) {
2007 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2008 scb->expect = h->dlen;
2009
2010 if (skb_tailroom(skb) < scb->expect) {
2011 kfree_skb(skb);
2012 hdev->reassembly[index] = NULL;
2013 return -ENOMEM;
2014 }
2015 }
2016 break;
2017 }
2018
2019 if (scb->expect == 0) {
2020 /* Complete frame */
2021
2022 bt_cb(skb)->pkt_type = type;
2023 hci_recv_frame(skb);
2024
2025 hdev->reassembly[index] = NULL;
2026 return remain;
2027 }
2028 }
2029
2030 return remain;
2031}
2032
Marcel Holtmannef222012007-07-11 06:42:04 +02002033int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2034{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302035 int rem = 0;
2036
Marcel Holtmannef222012007-07-11 06:42:04 +02002037 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2038 return -EILSEQ;
2039
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002040 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002041 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302042 if (rem < 0)
2043 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002044
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302045 data += (count - rem);
2046 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002047 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002048
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302049 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002050}
2051EXPORT_SYMBOL(hci_recv_fragment);
2052
Suraj Sumangala99811512010-07-14 13:02:19 +05302053#define STREAM_REASSEMBLY 0
2054
2055int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2056{
2057 int type;
2058 int rem = 0;
2059
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002060 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302061 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2062
2063 if (!skb) {
2064 struct { char type; } *pkt;
2065
2066 /* Start of the frame */
2067 pkt = data;
2068 type = pkt->type;
2069
2070 data++;
2071 count--;
2072 } else
2073 type = bt_cb(skb)->pkt_type;
2074
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002075 rem = hci_reassembly(hdev, type, data, count,
2076 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302077 if (rem < 0)
2078 return rem;
2079
2080 data += (count - rem);
2081 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002082 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302083
2084 return rem;
2085}
2086EXPORT_SYMBOL(hci_recv_stream_fragment);
2087
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088/* ---- Interface to upper protocols ---- */
2089
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090int hci_register_cb(struct hci_cb *cb)
2091{
2092 BT_DBG("%p name %s", cb, cb->name);
2093
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002094 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002096 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
2098 return 0;
2099}
2100EXPORT_SYMBOL(hci_register_cb);
2101
2102int hci_unregister_cb(struct hci_cb *cb)
2103{
2104 BT_DBG("%p name %s", cb, cb->name);
2105
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002106 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002108 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
2110 return 0;
2111}
2112EXPORT_SYMBOL(hci_unregister_cb);
2113
2114static int hci_send_frame(struct sk_buff *skb)
2115{
2116 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2117
2118 if (!hdev) {
2119 kfree_skb(skb);
2120 return -ENODEV;
2121 }
2122
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002123 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002125 /* Time stamp */
2126 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002128 /* Send copy to monitor */
2129 hci_send_to_monitor(hdev, skb);
2130
2131 if (atomic_read(&hdev->promisc)) {
2132 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002133 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 }
2135
2136 /* Get rid of skb owner, prior to sending to the driver. */
2137 skb_orphan(skb);
2138
2139 return hdev->send(skb);
2140}
2141
2142/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002143int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144{
2145 int len = HCI_COMMAND_HDR_SIZE + plen;
2146 struct hci_command_hdr *hdr;
2147 struct sk_buff *skb;
2148
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002149 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
2151 skb = bt_skb_alloc(len, GFP_ATOMIC);
2152 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002153 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 return -ENOMEM;
2155 }
2156
2157 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002158 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 hdr->plen = plen;
2160
2161 if (plen)
2162 memcpy(skb_put(skb, plen), param, plen);
2163
2164 BT_DBG("skb len %d", skb->len);
2165
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002166 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002168
Johan Hedberga5040ef2011-01-10 13:28:59 +02002169 if (test_bit(HCI_INIT, &hdev->flags))
2170 hdev->init_last_cmd = opcode;
2171
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002173 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175 return 0;
2176}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
2178/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002179void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180{
2181 struct hci_command_hdr *hdr;
2182
2183 if (!hdev->sent_cmd)
2184 return NULL;
2185
2186 hdr = (void *) hdev->sent_cmd->data;
2187
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002188 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 return NULL;
2190
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002191 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
2193 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2194}
2195
2196/* Send ACL data */
2197static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2198{
2199 struct hci_acl_hdr *hdr;
2200 int len = skb->len;
2201
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002202 skb_push(skb, HCI_ACL_HDR_SIZE);
2203 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002204 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002205 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2206 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207}
2208
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002209static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2210 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211{
2212 struct hci_dev *hdev = conn->hdev;
2213 struct sk_buff *list;
2214
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002215 list = skb_shinfo(skb)->frag_list;
2216 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 /* Non fragmented */
2218 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2219
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002220 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 } else {
2222 /* Fragmented */
2223 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2224
2225 skb_shinfo(skb)->frag_list = NULL;
2226
2227 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002228 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002230 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002231
2232 flags &= ~ACL_START;
2233 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 do {
2235 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002238 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002239 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
2241 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2242
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002243 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 } while (list);
2245
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002246 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002248}
2249
2250void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2251{
2252 struct hci_conn *conn = chan->conn;
2253 struct hci_dev *hdev = conn->hdev;
2254
2255 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2256
2257 skb->dev = (void *) hdev;
2258 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2259 hci_add_acl_hdr(skb, conn->handle, flags);
2260
2261 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002263 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264}
2265EXPORT_SYMBOL(hci_send_acl);
2266
2267/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002268void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269{
2270 struct hci_dev *hdev = conn->hdev;
2271 struct hci_sco_hdr hdr;
2272
2273 BT_DBG("%s len %d", hdev->name, skb->len);
2274
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002275 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 hdr.dlen = skb->len;
2277
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002278 skb_push(skb, HCI_SCO_HDR_SIZE);
2279 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002280 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
2282 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002283 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002286 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287}
2288EXPORT_SYMBOL(hci_send_sco);
2289
2290/* ---- HCI TX task (outgoing data) ---- */
2291
2292/* HCI Connection scheduler */
2293static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2294{
2295 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002296 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002299 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002301
2302 rcu_read_lock();
2303
2304 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002305 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002307
2308 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2309 continue;
2310
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 num++;
2312
2313 if (c->sent < min) {
2314 min = c->sent;
2315 conn = c;
2316 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002317
2318 if (hci_conn_num(hdev, type) == num)
2319 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 }
2321
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002322 rcu_read_unlock();
2323
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002325 int cnt, q;
2326
2327 switch (conn->type) {
2328 case ACL_LINK:
2329 cnt = hdev->acl_cnt;
2330 break;
2331 case SCO_LINK:
2332 case ESCO_LINK:
2333 cnt = hdev->sco_cnt;
2334 break;
2335 case LE_LINK:
2336 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2337 break;
2338 default:
2339 cnt = 0;
2340 BT_ERR("Unknown link type");
2341 }
2342
2343 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 *quote = q ? q : 1;
2345 } else
2346 *quote = 0;
2347
2348 BT_DBG("conn %p quote %d", conn, *quote);
2349 return conn;
2350}
2351
Ville Tervobae1f5d92011-02-10 22:38:53 -03002352static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353{
2354 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002355 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356
Ville Tervobae1f5d92011-02-10 22:38:53 -03002357 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002359 rcu_read_lock();
2360
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002362 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002363 if (c->type == type && c->sent) {
2364 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 hdev->name, batostr(&c->dst));
2366 hci_acl_disconn(c, 0x13);
2367 }
2368 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002369
2370 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371}
2372
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002373static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2374 int *quote)
2375{
2376 struct hci_conn_hash *h = &hdev->conn_hash;
2377 struct hci_chan *chan = NULL;
2378 int num = 0, min = ~0, cur_prio = 0;
2379 struct hci_conn *conn;
2380 int cnt, q, conn_num = 0;
2381
2382 BT_DBG("%s", hdev->name);
2383
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002384 rcu_read_lock();
2385
2386 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002387 struct hci_chan *tmp;
2388
2389 if (conn->type != type)
2390 continue;
2391
2392 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2393 continue;
2394
2395 conn_num++;
2396
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002397 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002398 struct sk_buff *skb;
2399
2400 if (skb_queue_empty(&tmp->data_q))
2401 continue;
2402
2403 skb = skb_peek(&tmp->data_q);
2404 if (skb->priority < cur_prio)
2405 continue;
2406
2407 if (skb->priority > cur_prio) {
2408 num = 0;
2409 min = ~0;
2410 cur_prio = skb->priority;
2411 }
2412
2413 num++;
2414
2415 if (conn->sent < min) {
2416 min = conn->sent;
2417 chan = tmp;
2418 }
2419 }
2420
2421 if (hci_conn_num(hdev, type) == conn_num)
2422 break;
2423 }
2424
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002425 rcu_read_unlock();
2426
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002427 if (!chan)
2428 return NULL;
2429
2430 switch (chan->conn->type) {
2431 case ACL_LINK:
2432 cnt = hdev->acl_cnt;
2433 break;
2434 case SCO_LINK:
2435 case ESCO_LINK:
2436 cnt = hdev->sco_cnt;
2437 break;
2438 case LE_LINK:
2439 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2440 break;
2441 default:
2442 cnt = 0;
2443 BT_ERR("Unknown link type");
2444 }
2445
2446 q = cnt / num;
2447 *quote = q ? q : 1;
2448 BT_DBG("chan %p quote %d", chan, *quote);
2449 return chan;
2450}
2451
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002452static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2453{
2454 struct hci_conn_hash *h = &hdev->conn_hash;
2455 struct hci_conn *conn;
2456 int num = 0;
2457
2458 BT_DBG("%s", hdev->name);
2459
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002460 rcu_read_lock();
2461
2462 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002463 struct hci_chan *chan;
2464
2465 if (conn->type != type)
2466 continue;
2467
2468 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2469 continue;
2470
2471 num++;
2472
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002473 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002474 struct sk_buff *skb;
2475
2476 if (chan->sent) {
2477 chan->sent = 0;
2478 continue;
2479 }
2480
2481 if (skb_queue_empty(&chan->data_q))
2482 continue;
2483
2484 skb = skb_peek(&chan->data_q);
2485 if (skb->priority >= HCI_PRIO_MAX - 1)
2486 continue;
2487
2488 skb->priority = HCI_PRIO_MAX - 1;
2489
2490 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2491 skb->priority);
2492 }
2493
2494 if (hci_conn_num(hdev, type) == num)
2495 break;
2496 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002497
2498 rcu_read_unlock();
2499
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002500}
2501
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002502static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2503{
2504 /* Calculate count of blocks used by this packet */
2505 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2506}
2507
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002508static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 if (!test_bit(HCI_RAW, &hdev->flags)) {
2511 /* ACL tx timeout must be longer than maximum
2512 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002513 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002514 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002515 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002517}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002519static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2520{
2521 unsigned int cnt = hdev->acl_cnt;
2522 struct hci_chan *chan;
2523 struct sk_buff *skb;
2524 int quote;
2525
2526 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002527
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002528 while (hdev->acl_cnt &&
2529 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002530 u32 priority = (skb_peek(&chan->data_q))->priority;
2531 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002532 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2533 skb->len, skb->priority);
2534
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002535 /* Stop if priority has changed */
2536 if (skb->priority < priority)
2537 break;
2538
2539 skb = skb_dequeue(&chan->data_q);
2540
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002541 hci_conn_enter_active_mode(chan->conn,
2542 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002543
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 hci_send_frame(skb);
2545 hdev->acl_last_tx = jiffies;
2546
2547 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002548 chan->sent++;
2549 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 }
2551 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002552
2553 if (cnt != hdev->acl_cnt)
2554 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555}
2556
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002557static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2558{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002559 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002560 struct hci_chan *chan;
2561 struct sk_buff *skb;
2562 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002563
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002564 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002565
2566 while (hdev->block_cnt > 0 &&
2567 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2568 u32 priority = (skb_peek(&chan->data_q))->priority;
2569 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2570 int blocks;
2571
2572 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2573 skb->len, skb->priority);
2574
2575 /* Stop if priority has changed */
2576 if (skb->priority < priority)
2577 break;
2578
2579 skb = skb_dequeue(&chan->data_q);
2580
2581 blocks = __get_blocks(hdev, skb);
2582 if (blocks > hdev->block_cnt)
2583 return;
2584
2585 hci_conn_enter_active_mode(chan->conn,
2586 bt_cb(skb)->force_active);
2587
2588 hci_send_frame(skb);
2589 hdev->acl_last_tx = jiffies;
2590
2591 hdev->block_cnt -= blocks;
2592 quote -= blocks;
2593
2594 chan->sent += blocks;
2595 chan->conn->sent += blocks;
2596 }
2597 }
2598
2599 if (cnt != hdev->block_cnt)
2600 hci_prio_recalculate(hdev, ACL_LINK);
2601}
2602
2603static inline void hci_sched_acl(struct hci_dev *hdev)
2604{
2605 BT_DBG("%s", hdev->name);
2606
2607 if (!hci_conn_num(hdev, ACL_LINK))
2608 return;
2609
2610 switch (hdev->flow_ctl_mode) {
2611 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2612 hci_sched_acl_pkt(hdev);
2613 break;
2614
2615 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2616 hci_sched_acl_blk(hdev);
2617 break;
2618 }
2619}
2620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621/* Schedule SCO */
2622static inline void hci_sched_sco(struct hci_dev *hdev)
2623{
2624 struct hci_conn *conn;
2625 struct sk_buff *skb;
2626 int quote;
2627
2628 BT_DBG("%s", hdev->name);
2629
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002630 if (!hci_conn_num(hdev, SCO_LINK))
2631 return;
2632
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2634 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2635 BT_DBG("skb %p len %d", skb, skb->len);
2636 hci_send_frame(skb);
2637
2638 conn->sent++;
2639 if (conn->sent == ~0)
2640 conn->sent = 0;
2641 }
2642 }
2643}
2644
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002645static inline void hci_sched_esco(struct hci_dev *hdev)
2646{
2647 struct hci_conn *conn;
2648 struct sk_buff *skb;
2649 int quote;
2650
2651 BT_DBG("%s", hdev->name);
2652
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002653 if (!hci_conn_num(hdev, ESCO_LINK))
2654 return;
2655
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002656 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2657 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2658 BT_DBG("skb %p len %d", skb, skb->len);
2659 hci_send_frame(skb);
2660
2661 conn->sent++;
2662 if (conn->sent == ~0)
2663 conn->sent = 0;
2664 }
2665 }
2666}
2667
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002668static inline void hci_sched_le(struct hci_dev *hdev)
2669{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002670 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002671 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002672 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002673
2674 BT_DBG("%s", hdev->name);
2675
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002676 if (!hci_conn_num(hdev, LE_LINK))
2677 return;
2678
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002679 if (!test_bit(HCI_RAW, &hdev->flags)) {
2680 /* LE tx timeout must be longer than maximum
2681 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002682 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002683 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002684 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002685 }
2686
2687 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002688 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002689 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002690 u32 priority = (skb_peek(&chan->data_q))->priority;
2691 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002692 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2693 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002694
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002695 /* Stop if priority has changed */
2696 if (skb->priority < priority)
2697 break;
2698
2699 skb = skb_dequeue(&chan->data_q);
2700
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002701 hci_send_frame(skb);
2702 hdev->le_last_tx = jiffies;
2703
2704 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002705 chan->sent++;
2706 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002707 }
2708 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002709
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002710 if (hdev->le_pkts)
2711 hdev->le_cnt = cnt;
2712 else
2713 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002714
2715 if (cnt != tmp)
2716 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002717}
2718
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002719static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002721 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 struct sk_buff *skb;
2723
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002724 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2725 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726
2727 /* Schedule queues and send stuff to HCI driver */
2728
2729 hci_sched_acl(hdev);
2730
2731 hci_sched_sco(hdev);
2732
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002733 hci_sched_esco(hdev);
2734
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002735 hci_sched_le(hdev);
2736
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 /* Send next queued raw (unknown type) packet */
2738 while ((skb = skb_dequeue(&hdev->raw_q)))
2739 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740}
2741
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002742/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
2744/* ACL data packet */
2745static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2746{
2747 struct hci_acl_hdr *hdr = (void *) skb->data;
2748 struct hci_conn *conn;
2749 __u16 handle, flags;
2750
2751 skb_pull(skb, HCI_ACL_HDR_SIZE);
2752
2753 handle = __le16_to_cpu(hdr->handle);
2754 flags = hci_flags(handle);
2755 handle = hci_handle(handle);
2756
2757 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2758
2759 hdev->stat.acl_rx++;
2760
2761 hci_dev_lock(hdev);
2762 conn = hci_conn_hash_lookup_handle(hdev, handle);
2763 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002764
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002766 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002769 l2cap_recv_acldata(conn, skb, flags);
2770 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002772 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 hdev->name, handle);
2774 }
2775
2776 kfree_skb(skb);
2777}
2778
2779/* SCO data packet */
2780static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2781{
2782 struct hci_sco_hdr *hdr = (void *) skb->data;
2783 struct hci_conn *conn;
2784 __u16 handle;
2785
2786 skb_pull(skb, HCI_SCO_HDR_SIZE);
2787
2788 handle = __le16_to_cpu(hdr->handle);
2789
2790 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2791
2792 hdev->stat.sco_rx++;
2793
2794 hci_dev_lock(hdev);
2795 conn = hci_conn_hash_lookup_handle(hdev, handle);
2796 hci_dev_unlock(hdev);
2797
2798 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002800 sco_recv_scodata(conn, skb);
2801 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002803 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 hdev->name, handle);
2805 }
2806
2807 kfree_skb(skb);
2808}
2809
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002810static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002812 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 struct sk_buff *skb;
2814
2815 BT_DBG("%s", hdev->name);
2816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002818 /* Send copy to monitor */
2819 hci_send_to_monitor(hdev, skb);
2820
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 if (atomic_read(&hdev->promisc)) {
2822 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002823 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 }
2825
2826 if (test_bit(HCI_RAW, &hdev->flags)) {
2827 kfree_skb(skb);
2828 continue;
2829 }
2830
2831 if (test_bit(HCI_INIT, &hdev->flags)) {
2832 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002833 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 case HCI_ACLDATA_PKT:
2835 case HCI_SCODATA_PKT:
2836 kfree_skb(skb);
2837 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 }
2840
2841 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002842 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002844 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 hci_event_packet(hdev, skb);
2846 break;
2847
2848 case HCI_ACLDATA_PKT:
2849 BT_DBG("%s ACL data packet", hdev->name);
2850 hci_acldata_packet(hdev, skb);
2851 break;
2852
2853 case HCI_SCODATA_PKT:
2854 BT_DBG("%s SCO data packet", hdev->name);
2855 hci_scodata_packet(hdev, skb);
2856 break;
2857
2858 default:
2859 kfree_skb(skb);
2860 break;
2861 }
2862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863}
2864
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002865static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002867 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 struct sk_buff *skb;
2869
2870 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2871
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002873 if (atomic_read(&hdev->cmd_cnt)) {
2874 skb = skb_dequeue(&hdev->cmd_q);
2875 if (!skb)
2876 return;
2877
Wei Yongjun7585b972009-02-25 18:29:52 +08002878 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002880 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2881 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 atomic_dec(&hdev->cmd_cnt);
2883 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002884 if (test_bit(HCI_RESET, &hdev->flags))
2885 del_timer(&hdev->cmd_timer);
2886 else
2887 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002888 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 } else {
2890 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002891 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 }
2893 }
2894}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002895
2896int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2897{
2898 /* General inquiry access code (GIAC) */
2899 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2900 struct hci_cp_inquiry cp;
2901
2902 BT_DBG("%s", hdev->name);
2903
2904 if (test_bit(HCI_INQUIRY, &hdev->flags))
2905 return -EINPROGRESS;
2906
Johan Hedberg46632622012-01-02 16:06:08 +02002907 inquiry_cache_flush(hdev);
2908
Andre Guedes2519a1f2011-11-07 11:45:24 -03002909 memset(&cp, 0, sizeof(cp));
2910 memcpy(&cp.lap, lap, sizeof(cp.lap));
2911 cp.length = length;
2912
2913 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2914}
Andre Guedes023d50492011-11-04 14:16:52 -03002915
2916int hci_cancel_inquiry(struct hci_dev *hdev)
2917{
2918 BT_DBG("%s", hdev->name);
2919
2920 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2921 return -EPERM;
2922
2923 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2924}