blob: bee425ad25b537ee34897640d532a47844f492cc [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Marcel Holtmannb78752c2010-08-08 23:06:53 -040056static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020057static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020058static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/* ---- HCI requests ---- */
76
Johan Hedberg23bb5762010-12-21 23:01:27 +020077void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Johan Hedberg23bb5762010-12-21 23:01:27 +020079 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
Johan Hedberga5040ef2011-01-10 13:28:59 +020081 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020084 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020086 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020087 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020096 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020097 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300127static int __hci_request(struct hci_dev *hdev,
128 void (*req)(struct hci_dev *hdev, unsigned long opt),
129 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Johan Hedberga5040ef2011-01-10 13:28:59 +0200163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300170static int hci_request(struct hci_dev *hdev,
171 void (*req)(struct hci_dev *hdev, unsigned long opt),
172 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
174 int ret;
175
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200176 if (!test_bit(HCI_UP, &hdev->flags))
177 return -ENETDOWN;
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 /* Serialize all requests */
180 hci_req_lock(hdev);
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
183
184 return ret;
185}
186
187static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188{
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300192 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200196static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200198 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800199 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200200 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200202 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 /* Mandatory initialization */
205
206 /* Reset */
Szymon Janca6c511c2012-05-23 12:35:46 +0200207 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200208 set_bit(HCI_RESET, &hdev->flags);
209 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
223
224 /* Read Class of Device */
225 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
226
227 /* Read Local Name */
228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233 /* Optional initialization */
234
235 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200236 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200237 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700240 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200241 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200242
243 bacpy(&cp.bdaddr, BDADDR_ANY);
244 cp.delete_all = 1;
245 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246}
247
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200248static void amp_init(struct hci_dev *hdev)
249{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200250 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
251
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200252 /* Reset */
253 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
254
255 /* Read Local Version */
256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300257
258 /* Read Local AMP Info */
259 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200260}
261
262static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
263{
264 struct sk_buff *skb;
265
266 BT_DBG("%s %ld", hdev->name, opt);
267
268 /* Driver initialization */
269
270 /* Special commands */
271 while ((skb = skb_dequeue(&hdev->driver_init))) {
272 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
273 skb->dev = (void *) hdev;
274
275 skb_queue_tail(&hdev->cmd_q, skb);
276 queue_work(hdev->workqueue, &hdev->cmd_work);
277 }
278 skb_queue_purge(&hdev->driver_init);
279
280 switch (hdev->dev_type) {
281 case HCI_BREDR:
282 bredr_init(hdev);
283 break;
284
285 case HCI_AMP:
286 amp_init(hdev);
287 break;
288
289 default:
290 BT_ERR("Unknown device type %d", hdev->dev_type);
291 break;
292 }
293
294}
295
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300296static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
297{
298 BT_DBG("%s", hdev->name);
299
300 /* Read LE buffer size */
301 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
302}
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __u8 scan = opt;
307
308 BT_DBG("%s %x", hdev->name, scan);
309
310 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200311 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}
313
314static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
315{
316 __u8 auth = opt;
317
318 BT_DBG("%s %x", hdev->name, auth);
319
320 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200321 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322}
323
324static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
325{
326 __u8 encrypt = opt;
327
328 BT_DBG("%s %x", hdev->name, encrypt);
329
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200331 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332}
333
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200334static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
335{
336 __le16 policy = cpu_to_le16(opt);
337
Marcel Holtmanna418b892008-11-30 12:17:28 +0100338 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200339
340 /* Default link policy */
341 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
342}
343
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900344/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 * Device is held on return. */
346struct hci_dev *hci_dev_get(int index)
347{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200348 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350 BT_DBG("%d", index);
351
352 if (index < 0)
353 return NULL;
354
355 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200356 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 if (d->id == index) {
358 hdev = hci_dev_hold(d);
359 break;
360 }
361 }
362 read_unlock(&hci_dev_list_lock);
363 return hdev;
364}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
366/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200367
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200368bool hci_discovery_active(struct hci_dev *hdev)
369{
370 struct discovery_state *discov = &hdev->discovery;
371
Andre Guedes6fbe1952012-02-03 17:47:58 -0300372 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300373 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300374 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200375 return true;
376
Andre Guedes6fbe1952012-02-03 17:47:58 -0300377 default:
378 return false;
379 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200380}
381
Johan Hedbergff9ef572012-01-04 14:23:45 +0200382void hci_discovery_set_state(struct hci_dev *hdev, int state)
383{
384 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
385
386 if (hdev->discovery.state == state)
387 return;
388
389 switch (state) {
390 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300391 if (hdev->discovery.state != DISCOVERY_STARTING)
392 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200393 break;
394 case DISCOVERY_STARTING:
395 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300396 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200397 mgmt_discovering(hdev, 1);
398 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200399 case DISCOVERY_RESOLVING:
400 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200401 case DISCOVERY_STOPPING:
402 break;
403 }
404
405 hdev->discovery.state = state;
406}
407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408static void inquiry_cache_flush(struct hci_dev *hdev)
409{
Johan Hedberg30883512012-01-04 14:16:21 +0200410 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200411 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Johan Hedberg561aafb2012-01-04 13:31:59 +0200413 list_for_each_entry_safe(p, n, &cache->all, all) {
414 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200415 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200417
418 INIT_LIST_HEAD(&cache->unknown);
419 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
421
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300422struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
423 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{
Johan Hedberg30883512012-01-04 14:16:21 +0200425 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 struct inquiry_entry *e;
427
428 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
429
Johan Hedberg561aafb2012-01-04 13:31:59 +0200430 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200432 return e;
433 }
434
435 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436}
437
Johan Hedberg561aafb2012-01-04 13:31:59 +0200438struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300439 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200440{
Johan Hedberg30883512012-01-04 14:16:21 +0200441 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200442 struct inquiry_entry *e;
443
444 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
445
446 list_for_each_entry(e, &cache->unknown, list) {
447 if (!bacmp(&e->data.bdaddr, bdaddr))
448 return e;
449 }
450
451 return NULL;
452}
453
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200454struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300455 bdaddr_t *bdaddr,
456 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200457{
458 struct discovery_state *cache = &hdev->discovery;
459 struct inquiry_entry *e;
460
461 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
462
463 list_for_each_entry(e, &cache->resolve, list) {
464 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
465 return e;
466 if (!bacmp(&e->data.bdaddr, bdaddr))
467 return e;
468 }
469
470 return NULL;
471}
472
Johan Hedberga3d4e202012-01-09 00:53:02 +0200473void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300474 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200475{
476 struct discovery_state *cache = &hdev->discovery;
477 struct list_head *pos = &cache->resolve;
478 struct inquiry_entry *p;
479
480 list_del(&ie->list);
481
482 list_for_each_entry(p, &cache->resolve, list) {
483 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300484 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200485 break;
486 pos = &p->list;
487 }
488
489 list_add(&ie->list, pos);
490}
491
Johan Hedberg31754052012-01-04 13:39:52 +0200492bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300493 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
Johan Hedberg30883512012-01-04 14:16:21 +0200495 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200496 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
499
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200500 if (ssp)
501 *ssp = data->ssp_mode;
502
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200503 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200504 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200505 if (ie->data.ssp_mode && ssp)
506 *ssp = true;
507
Johan Hedberga3d4e202012-01-09 00:53:02 +0200508 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300509 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200510 ie->data.rssi = data->rssi;
511 hci_inquiry_cache_update_resolve(hdev, ie);
512 }
513
Johan Hedberg561aafb2012-01-04 13:31:59 +0200514 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200515 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200516
Johan Hedberg561aafb2012-01-04 13:31:59 +0200517 /* Entry not in the cache. Add new one. */
518 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
519 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200520 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200521
522 list_add(&ie->all, &cache->all);
523
524 if (name_known) {
525 ie->name_state = NAME_KNOWN;
526 } else {
527 ie->name_state = NAME_NOT_KNOWN;
528 list_add(&ie->list, &cache->unknown);
529 }
530
531update:
532 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300533 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200534 ie->name_state = NAME_KNOWN;
535 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 }
537
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200538 memcpy(&ie->data, data, sizeof(*data));
539 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200541
542 if (ie->name_state == NAME_NOT_KNOWN)
543 return false;
544
545 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
547
548static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
549{
Johan Hedberg30883512012-01-04 14:16:21 +0200550 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 struct inquiry_info *info = (struct inquiry_info *) buf;
552 struct inquiry_entry *e;
553 int copied = 0;
554
Johan Hedberg561aafb2012-01-04 13:31:59 +0200555 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200557
558 if (copied >= num)
559 break;
560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 bacpy(&info->bdaddr, &data->bdaddr);
562 info->pscan_rep_mode = data->pscan_rep_mode;
563 info->pscan_period_mode = data->pscan_period_mode;
564 info->pscan_mode = data->pscan_mode;
565 memcpy(info->dev_class, data->dev_class, 3);
566 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200569 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
571
572 BT_DBG("cache %p, copied %d", cache, copied);
573 return copied;
574}
575
576static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
577{
578 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
579 struct hci_cp_inquiry cp;
580
581 BT_DBG("%s", hdev->name);
582
583 if (test_bit(HCI_INQUIRY, &hdev->flags))
584 return;
585
586 /* Start Inquiry */
587 memcpy(&cp.lap, &ir->lap, 3);
588 cp.length = ir->length;
589 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200590 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591}
592
593int hci_inquiry(void __user *arg)
594{
595 __u8 __user *ptr = arg;
596 struct hci_inquiry_req ir;
597 struct hci_dev *hdev;
598 int err = 0, do_inquiry = 0, max_rsp;
599 long timeo;
600 __u8 *buf;
601
602 if (copy_from_user(&ir, ptr, sizeof(ir)))
603 return -EFAULT;
604
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200605 hdev = hci_dev_get(ir.dev_id);
606 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return -ENODEV;
608
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300609 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900610 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300611 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 inquiry_cache_flush(hdev);
613 do_inquiry = 1;
614 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300615 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Marcel Holtmann04837f62006-07-03 10:02:33 +0200617 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200618
619 if (do_inquiry) {
620 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
621 if (err < 0)
622 goto done;
623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300625 /* for unlimited number of responses we will use buffer with
626 * 255 entries
627 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
629
630 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
631 * copy it to the user space.
632 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100633 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200634 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 err = -ENOMEM;
636 goto done;
637 }
638
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300639 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300641 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
643 BT_DBG("num_rsp %d", ir.num_rsp);
644
645 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
646 ptr += sizeof(ir);
647 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300648 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900650 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 err = -EFAULT;
652
653 kfree(buf);
654
655done:
656 hci_dev_put(hdev);
657 return err;
658}
659
660/* ---- HCI ioctl helpers ---- */
661
662int hci_dev_open(__u16 dev)
663{
664 struct hci_dev *hdev;
665 int ret = 0;
666
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200667 hdev = hci_dev_get(dev);
668 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 return -ENODEV;
670
671 BT_DBG("%s %p", hdev->name, hdev);
672
673 hci_req_lock(hdev);
674
Johan Hovold94324962012-03-15 14:48:41 +0100675 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
676 ret = -ENODEV;
677 goto done;
678 }
679
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200680 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
681 ret = -ERFKILL;
682 goto done;
683 }
684
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 if (test_bit(HCI_UP, &hdev->flags)) {
686 ret = -EALREADY;
687 goto done;
688 }
689
690 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
691 set_bit(HCI_RAW, &hdev->flags);
692
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200693 /* Treat all non BR/EDR controllers as raw devices if
694 enable_hs is not set */
695 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100696 set_bit(HCI_RAW, &hdev->flags);
697
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 if (hdev->open(hdev)) {
699 ret = -EIO;
700 goto done;
701 }
702
703 if (!test_bit(HCI_RAW, &hdev->flags)) {
704 atomic_set(&hdev->cmd_cnt, 1);
705 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200706 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
Marcel Holtmann04837f62006-07-03 10:02:33 +0200708 ret = __hci_request(hdev, hci_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Andre Guedeseead27d2011-06-30 19:20:55 -0300711 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300712 ret = __hci_request(hdev, hci_le_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300713 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 clear_bit(HCI_INIT, &hdev->flags);
716 }
717
718 if (!ret) {
719 hci_dev_hold(hdev);
720 set_bit(HCI_UP, &hdev->flags);
721 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200722 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300723 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200724 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300725 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200726 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900727 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200729 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200730 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400731 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733 skb_queue_purge(&hdev->cmd_q);
734 skb_queue_purge(&hdev->rx_q);
735
736 if (hdev->flush)
737 hdev->flush(hdev);
738
739 if (hdev->sent_cmd) {
740 kfree_skb(hdev->sent_cmd);
741 hdev->sent_cmd = NULL;
742 }
743
744 hdev->close(hdev);
745 hdev->flags = 0;
746 }
747
748done:
749 hci_req_unlock(hdev);
750 hci_dev_put(hdev);
751 return ret;
752}
753
754static int hci_dev_do_close(struct hci_dev *hdev)
755{
756 BT_DBG("%s %p", hdev->name, hdev);
757
Andre Guedes28b75a82012-02-03 17:48:00 -0300758 cancel_work_sync(&hdev->le_scan);
759
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 hci_req_cancel(hdev, ENODEV);
761 hci_req_lock(hdev);
762
763 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300764 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 hci_req_unlock(hdev);
766 return 0;
767 }
768
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200769 /* Flush RX and TX works */
770 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400771 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200773 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200774 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200775 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200776 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200777 }
778
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200779 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200780 cancel_delayed_work(&hdev->service_cache);
781
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300782 cancel_delayed_work_sync(&hdev->le_scan_disable);
783
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300784 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 inquiry_cache_flush(hdev);
786 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300787 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789 hci_notify(hdev, HCI_DEV_DOWN);
790
791 if (hdev->flush)
792 hdev->flush(hdev);
793
794 /* Reset device */
795 skb_queue_purge(&hdev->cmd_q);
796 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200797 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200798 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200800 __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300801 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 clear_bit(HCI_INIT, &hdev->flags);
803 }
804
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200805 /* flush cmd work */
806 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
808 /* Drop queues */
809 skb_queue_purge(&hdev->rx_q);
810 skb_queue_purge(&hdev->cmd_q);
811 skb_queue_purge(&hdev->raw_q);
812
813 /* Drop last sent command */
814 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300815 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 kfree_skb(hdev->sent_cmd);
817 hdev->sent_cmd = NULL;
818 }
819
820 /* After this point our queues are empty
821 * and no tasks are scheduled. */
822 hdev->close(hdev);
823
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100824 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
825 hci_dev_lock(hdev);
826 mgmt_powered(hdev, 0);
827 hci_dev_unlock(hdev);
828 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 /* Clear flags */
831 hdev->flags = 0;
832
Johan Hedberge59fda82012-02-22 18:11:53 +0200833 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200834 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 hci_req_unlock(hdev);
837
838 hci_dev_put(hdev);
839 return 0;
840}
841
842int hci_dev_close(__u16 dev)
843{
844 struct hci_dev *hdev;
845 int err;
846
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200847 hdev = hci_dev_get(dev);
848 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100850
851 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
852 cancel_delayed_work(&hdev->power_off);
853
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 hci_dev_put(hdev);
857 return err;
858}
859
860int hci_dev_reset(__u16 dev)
861{
862 struct hci_dev *hdev;
863 int ret = 0;
864
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200865 hdev = hci_dev_get(dev);
866 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return -ENODEV;
868
869 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871 if (!test_bit(HCI_UP, &hdev->flags))
872 goto done;
873
874 /* Drop queues */
875 skb_queue_purge(&hdev->rx_q);
876 skb_queue_purge(&hdev->cmd_q);
877
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300878 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 inquiry_cache_flush(hdev);
880 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300881 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
883 if (hdev->flush)
884 hdev->flush(hdev);
885
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900886 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300887 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
889 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200890 ret = __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300891 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
893done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 hci_req_unlock(hdev);
895 hci_dev_put(hdev);
896 return ret;
897}
898
899int hci_dev_reset_stat(__u16 dev)
900{
901 struct hci_dev *hdev;
902 int ret = 0;
903
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200904 hdev = hci_dev_get(dev);
905 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 return -ENODEV;
907
908 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
909
910 hci_dev_put(hdev);
911
912 return ret;
913}
914
915int hci_dev_cmd(unsigned int cmd, void __user *arg)
916{
917 struct hci_dev *hdev;
918 struct hci_dev_req dr;
919 int err = 0;
920
921 if (copy_from_user(&dr, arg, sizeof(dr)))
922 return -EFAULT;
923
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200924 hdev = hci_dev_get(dr.dev_id);
925 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 return -ENODEV;
927
928 switch (cmd) {
929 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200930 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300931 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 break;
933
934 case HCISETENCRYPT:
935 if (!lmp_encrypt_capable(hdev)) {
936 err = -EOPNOTSUPP;
937 break;
938 }
939
940 if (!test_bit(HCI_AUTH, &hdev->flags)) {
941 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200942 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300943 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 if (err)
945 break;
946 }
947
Marcel Holtmann04837f62006-07-03 10:02:33 +0200948 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300949 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 break;
951
952 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200953 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300954 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 break;
956
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200957 case HCISETLINKPOL:
958 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300959 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200960 break;
961
962 case HCISETLINKMODE:
963 hdev->link_mode = ((__u16) dr.dev_opt) &
964 (HCI_LM_MASTER | HCI_LM_ACCEPT);
965 break;
966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 case HCISETPTYPE:
968 hdev->pkt_type = (__u16) dr.dev_opt;
969 break;
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200972 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
973 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 break;
975
976 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200977 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
978 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 break;
980
981 default:
982 err = -EINVAL;
983 break;
984 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 hci_dev_put(hdev);
987 return err;
988}
989
990int hci_get_dev_list(void __user *arg)
991{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200992 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 struct hci_dev_list_req *dl;
994 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 int n = 0, size, err;
996 __u16 dev_num;
997
998 if (get_user(dev_num, (__u16 __user *) arg))
999 return -EFAULT;
1000
1001 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1002 return -EINVAL;
1003
1004 size = sizeof(*dl) + dev_num * sizeof(*dr);
1005
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001006 dl = kzalloc(size, GFP_KERNEL);
1007 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 return -ENOMEM;
1009
1010 dr = dl->dev_req;
1011
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001012 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001013 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001014 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001015 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001016
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001017 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1018 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001019
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 (dr + n)->dev_id = hdev->id;
1021 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 if (++n >= dev_num)
1024 break;
1025 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001026 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
1028 dl->dev_num = n;
1029 size = sizeof(*dl) + n * sizeof(*dr);
1030
1031 err = copy_to_user(arg, dl, size);
1032 kfree(dl);
1033
1034 return err ? -EFAULT : 0;
1035}
1036
1037int hci_get_dev_info(void __user *arg)
1038{
1039 struct hci_dev *hdev;
1040 struct hci_dev_info di;
1041 int err = 0;
1042
1043 if (copy_from_user(&di, arg, sizeof(di)))
1044 return -EFAULT;
1045
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001046 hdev = hci_dev_get(di.dev_id);
1047 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 return -ENODEV;
1049
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001050 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001051 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001052
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001053 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1054 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001055
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 strcpy(di.name, hdev->name);
1057 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001058 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 di.flags = hdev->flags;
1060 di.pkt_type = hdev->pkt_type;
1061 di.acl_mtu = hdev->acl_mtu;
1062 di.acl_pkts = hdev->acl_pkts;
1063 di.sco_mtu = hdev->sco_mtu;
1064 di.sco_pkts = hdev->sco_pkts;
1065 di.link_policy = hdev->link_policy;
1066 di.link_mode = hdev->link_mode;
1067
1068 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1069 memcpy(&di.features, &hdev->features, sizeof(di.features));
1070
1071 if (copy_to_user(arg, &di, sizeof(di)))
1072 err = -EFAULT;
1073
1074 hci_dev_put(hdev);
1075
1076 return err;
1077}
1078
1079/* ---- Interface to HCI drivers ---- */
1080
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001081static int hci_rfkill_set_block(void *data, bool blocked)
1082{
1083 struct hci_dev *hdev = data;
1084
1085 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1086
1087 if (!blocked)
1088 return 0;
1089
1090 hci_dev_do_close(hdev);
1091
1092 return 0;
1093}
1094
1095static const struct rfkill_ops hci_rfkill_ops = {
1096 .set_block = hci_rfkill_set_block,
1097};
1098
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001099static void hci_power_on(struct work_struct *work)
1100{
1101 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1102
1103 BT_DBG("%s", hdev->name);
1104
1105 if (hci_dev_open(hdev->id) < 0)
1106 return;
1107
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001108 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001109 schedule_delayed_work(&hdev->power_off,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001110 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001111
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001112 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001113 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001114}
1115
1116static void hci_power_off(struct work_struct *work)
1117{
Johan Hedberg32435532011-11-07 22:16:04 +02001118 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001119 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001120
1121 BT_DBG("%s", hdev->name);
1122
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001123 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001124}
1125
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001126static void hci_discov_off(struct work_struct *work)
1127{
1128 struct hci_dev *hdev;
1129 u8 scan = SCAN_PAGE;
1130
1131 hdev = container_of(work, struct hci_dev, discov_off.work);
1132
1133 BT_DBG("%s", hdev->name);
1134
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001135 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001136
1137 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1138
1139 hdev->discov_timeout = 0;
1140
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001141 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001142}
1143
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001144int hci_uuids_clear(struct hci_dev *hdev)
1145{
1146 struct list_head *p, *n;
1147
1148 list_for_each_safe(p, n, &hdev->uuids) {
1149 struct bt_uuid *uuid;
1150
1151 uuid = list_entry(p, struct bt_uuid, list);
1152
1153 list_del(p);
1154 kfree(uuid);
1155 }
1156
1157 return 0;
1158}
1159
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001160int hci_link_keys_clear(struct hci_dev *hdev)
1161{
1162 struct list_head *p, *n;
1163
1164 list_for_each_safe(p, n, &hdev->link_keys) {
1165 struct link_key *key;
1166
1167 key = list_entry(p, struct link_key, list);
1168
1169 list_del(p);
1170 kfree(key);
1171 }
1172
1173 return 0;
1174}
1175
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001176int hci_smp_ltks_clear(struct hci_dev *hdev)
1177{
1178 struct smp_ltk *k, *tmp;
1179
1180 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1181 list_del(&k->list);
1182 kfree(k);
1183 }
1184
1185 return 0;
1186}
1187
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001188struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1189{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001190 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001191
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001192 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001193 if (bacmp(bdaddr, &k->bdaddr) == 0)
1194 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001195
1196 return NULL;
1197}
1198
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301199static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001200 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001201{
1202 /* Legacy key */
1203 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301204 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001205
1206 /* Debug keys are insecure so don't store them persistently */
1207 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301208 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001209
1210 /* Changed combination key and there's no previous one */
1211 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301212 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001213
1214 /* Security mode 3 case */
1215 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301216 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001217
1218 /* Neither local nor remote side had no-bonding as requirement */
1219 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301220 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001221
1222 /* Local side had dedicated bonding as requirement */
1223 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301224 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001225
1226 /* Remote side had dedicated bonding as requirement */
1227 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301228 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001229
1230 /* If none of the above criteria match, then don't store the key
1231 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301232 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001233}
1234
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001236{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001237 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001238
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001239 list_for_each_entry(k, &hdev->long_term_keys, list) {
1240 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001241 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001242 continue;
1243
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001244 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001245 }
1246
1247 return NULL;
1248}
1249EXPORT_SYMBOL(hci_find_ltk);
1250
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001251struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001252 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001253{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001254 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001255
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001256 list_for_each_entry(k, &hdev->long_term_keys, list)
1257 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001258 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001259 return k;
1260
1261 return NULL;
1262}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001263EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001264
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001265int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001266 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001267{
1268 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301269 u8 old_key_type;
1270 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001271
1272 old_key = hci_find_link_key(hdev, bdaddr);
1273 if (old_key) {
1274 old_key_type = old_key->type;
1275 key = old_key;
1276 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001277 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001278 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1279 if (!key)
1280 return -ENOMEM;
1281 list_add(&key->list, &hdev->link_keys);
1282 }
1283
1284 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1285
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001286 /* Some buggy controller combinations generate a changed
1287 * combination key for legacy pairing even when there's no
1288 * previous key */
1289 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001290 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001291 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001292 if (conn)
1293 conn->key_type = type;
1294 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001295
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001296 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001297 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001298 key->pin_len = pin_len;
1299
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001300 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001301 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001302 else
1303 key->type = type;
1304
Johan Hedberg4df378a2011-04-28 11:29:03 -07001305 if (!new_key)
1306 return 0;
1307
1308 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1309
Johan Hedberg744cf192011-11-08 20:40:14 +02001310 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001311
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301312 if (conn)
1313 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001314
1315 return 0;
1316}
1317
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001318int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001319 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001320 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001321{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001322 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001323
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001324 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1325 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001327 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1328 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001329 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001330 else {
1331 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001332 if (!key)
1333 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001334 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001335 }
1336
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001337 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001338 key->bdaddr_type = addr_type;
1339 memcpy(key->val, tk, sizeof(key->val));
1340 key->authenticated = authenticated;
1341 key->ediv = ediv;
1342 key->enc_size = enc_size;
1343 key->type = type;
1344 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001345
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001346 if (!new_key)
1347 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001348
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001349 if (type & HCI_SMP_LTK)
1350 mgmt_new_ltk(hdev, key, 1);
1351
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001352 return 0;
1353}
1354
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001355int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1356{
1357 struct link_key *key;
1358
1359 key = hci_find_link_key(hdev, bdaddr);
1360 if (!key)
1361 return -ENOENT;
1362
1363 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1364
1365 list_del(&key->list);
1366 kfree(key);
1367
1368 return 0;
1369}
1370
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001371int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1372{
1373 struct smp_ltk *k, *tmp;
1374
1375 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1376 if (bacmp(bdaddr, &k->bdaddr))
1377 continue;
1378
1379 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1380
1381 list_del(&k->list);
1382 kfree(k);
1383 }
1384
1385 return 0;
1386}
1387
Ville Tervo6bd32322011-02-16 16:32:41 +02001388/* HCI command timer function */
1389static void hci_cmd_timer(unsigned long arg)
1390{
1391 struct hci_dev *hdev = (void *) arg;
1392
1393 BT_ERR("%s command tx timeout", hdev->name);
1394 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001395 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001396}
1397
Szymon Janc2763eda2011-03-22 13:12:22 +01001398struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001399 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001400{
1401 struct oob_data *data;
1402
1403 list_for_each_entry(data, &hdev->remote_oob_data, list)
1404 if (bacmp(bdaddr, &data->bdaddr) == 0)
1405 return data;
1406
1407 return NULL;
1408}
1409
1410int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1411{
1412 struct oob_data *data;
1413
1414 data = hci_find_remote_oob_data(hdev, bdaddr);
1415 if (!data)
1416 return -ENOENT;
1417
1418 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1419
1420 list_del(&data->list);
1421 kfree(data);
1422
1423 return 0;
1424}
1425
1426int hci_remote_oob_data_clear(struct hci_dev *hdev)
1427{
1428 struct oob_data *data, *n;
1429
1430 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1431 list_del(&data->list);
1432 kfree(data);
1433 }
1434
1435 return 0;
1436}
1437
1438int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001439 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001440{
1441 struct oob_data *data;
1442
1443 data = hci_find_remote_oob_data(hdev, bdaddr);
1444
1445 if (!data) {
1446 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1447 if (!data)
1448 return -ENOMEM;
1449
1450 bacpy(&data->bdaddr, bdaddr);
1451 list_add(&data->list, &hdev->remote_oob_data);
1452 }
1453
1454 memcpy(data->hash, hash, sizeof(data->hash));
1455 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1456
1457 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1458
1459 return 0;
1460}
1461
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001462struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001463{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001464 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001465
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001466 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001467 if (bacmp(bdaddr, &b->bdaddr) == 0)
1468 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001469
1470 return NULL;
1471}
1472
1473int hci_blacklist_clear(struct hci_dev *hdev)
1474{
1475 struct list_head *p, *n;
1476
1477 list_for_each_safe(p, n, &hdev->blacklist) {
1478 struct bdaddr_list *b;
1479
1480 b = list_entry(p, struct bdaddr_list, list);
1481
1482 list_del(p);
1483 kfree(b);
1484 }
1485
1486 return 0;
1487}
1488
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001489int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001490{
1491 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001492
1493 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1494 return -EBADF;
1495
Antti Julku5e762442011-08-25 16:48:02 +03001496 if (hci_blacklist_lookup(hdev, bdaddr))
1497 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001498
1499 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001500 if (!entry)
1501 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001502
1503 bacpy(&entry->bdaddr, bdaddr);
1504
1505 list_add(&entry->list, &hdev->blacklist);
1506
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001507 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508}
1509
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001510int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001511{
1512 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001513
Szymon Janc1ec918c2011-11-16 09:32:21 +01001514 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001515 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001516
1517 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001518 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001519 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001520
1521 list_del(&entry->list);
1522 kfree(entry);
1523
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001524 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001525}
1526
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001527static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1528{
1529 struct le_scan_params *param = (struct le_scan_params *) opt;
1530 struct hci_cp_le_set_scan_param cp;
1531
1532 memset(&cp, 0, sizeof(cp));
1533 cp.type = param->type;
1534 cp.interval = cpu_to_le16(param->interval);
1535 cp.window = cpu_to_le16(param->window);
1536
1537 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1538}
1539
1540static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1541{
1542 struct hci_cp_le_set_scan_enable cp;
1543
1544 memset(&cp, 0, sizeof(cp));
1545 cp.enable = 1;
1546
1547 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1548}
1549
1550static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001551 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001552{
1553 long timeo = msecs_to_jiffies(3000);
1554 struct le_scan_params param;
1555 int err;
1556
1557 BT_DBG("%s", hdev->name);
1558
1559 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1560 return -EINPROGRESS;
1561
1562 param.type = type;
1563 param.interval = interval;
1564 param.window = window;
1565
1566 hci_req_lock(hdev);
1567
1568 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001569 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001570 if (!err)
1571 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1572
1573 hci_req_unlock(hdev);
1574
1575 if (err < 0)
1576 return err;
1577
1578 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001579 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001580
1581 return 0;
1582}
1583
Andre Guedes7dbfac12012-03-15 16:52:07 -03001584int hci_cancel_le_scan(struct hci_dev *hdev)
1585{
1586 BT_DBG("%s", hdev->name);
1587
1588 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1589 return -EALREADY;
1590
1591 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1592 struct hci_cp_le_set_scan_enable cp;
1593
1594 /* Send HCI command to disable LE Scan */
1595 memset(&cp, 0, sizeof(cp));
1596 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1597 }
1598
1599 return 0;
1600}
1601
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001602static void le_scan_disable_work(struct work_struct *work)
1603{
1604 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001605 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001606 struct hci_cp_le_set_scan_enable cp;
1607
1608 BT_DBG("%s", hdev->name);
1609
1610 memset(&cp, 0, sizeof(cp));
1611
1612 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1613}
1614
Andre Guedes28b75a82012-02-03 17:48:00 -03001615static void le_scan_work(struct work_struct *work)
1616{
1617 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1618 struct le_scan_params *param = &hdev->le_scan_params;
1619
1620 BT_DBG("%s", hdev->name);
1621
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001622 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1623 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001624}
1625
1626int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001627 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001628{
1629 struct le_scan_params *param = &hdev->le_scan_params;
1630
1631 BT_DBG("%s", hdev->name);
1632
1633 if (work_busy(&hdev->le_scan))
1634 return -EINPROGRESS;
1635
1636 param->type = type;
1637 param->interval = interval;
1638 param->window = window;
1639 param->timeout = timeout;
1640
1641 queue_work(system_long_wq, &hdev->le_scan);
1642
1643 return 0;
1644}
1645
David Herrmann9be0dab2012-04-22 14:39:57 +02001646/* Alloc HCI device */
1647struct hci_dev *hci_alloc_dev(void)
1648{
1649 struct hci_dev *hdev;
1650
1651 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1652 if (!hdev)
1653 return NULL;
1654
David Herrmannb1b813d2012-04-22 14:39:58 +02001655 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1656 hdev->esco_type = (ESCO_HV1);
1657 hdev->link_mode = (HCI_LM_ACCEPT);
1658 hdev->io_capability = 0x03; /* No Input No Output */
1659
David Herrmannb1b813d2012-04-22 14:39:58 +02001660 hdev->sniff_max_interval = 800;
1661 hdev->sniff_min_interval = 80;
1662
1663 mutex_init(&hdev->lock);
1664 mutex_init(&hdev->req_lock);
1665
1666 INIT_LIST_HEAD(&hdev->mgmt_pending);
1667 INIT_LIST_HEAD(&hdev->blacklist);
1668 INIT_LIST_HEAD(&hdev->uuids);
1669 INIT_LIST_HEAD(&hdev->link_keys);
1670 INIT_LIST_HEAD(&hdev->long_term_keys);
1671 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001672
1673 INIT_WORK(&hdev->rx_work, hci_rx_work);
1674 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1675 INIT_WORK(&hdev->tx_work, hci_tx_work);
1676 INIT_WORK(&hdev->power_on, hci_power_on);
1677 INIT_WORK(&hdev->le_scan, le_scan_work);
1678
David Herrmannb1b813d2012-04-22 14:39:58 +02001679 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1680 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1681 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1682
David Herrmann9be0dab2012-04-22 14:39:57 +02001683 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001684 skb_queue_head_init(&hdev->rx_q);
1685 skb_queue_head_init(&hdev->cmd_q);
1686 skb_queue_head_init(&hdev->raw_q);
1687
1688 init_waitqueue_head(&hdev->req_wait_q);
1689
1690 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1691
David Herrmannb1b813d2012-04-22 14:39:58 +02001692 hci_init_sysfs(hdev);
1693 discovery_init(hdev);
1694 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001695
1696 return hdev;
1697}
1698EXPORT_SYMBOL(hci_alloc_dev);
1699
1700/* Free HCI device */
1701void hci_free_dev(struct hci_dev *hdev)
1702{
1703 skb_queue_purge(&hdev->driver_init);
1704
1705 /* will free via device release */
1706 put_device(&hdev->dev);
1707}
1708EXPORT_SYMBOL(hci_free_dev);
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710/* Register HCI device */
1711int hci_register_dev(struct hci_dev *hdev)
1712{
Ulisses Furquimfc507442012-04-18 12:13:04 -03001713 struct list_head *head, *p;
David Herrmannb1b813d2012-04-22 14:39:58 +02001714 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
David Herrmann010666a2012-01-07 15:47:07 +01001716 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 return -EINVAL;
1718
Ulisses Furquimfc507442012-04-18 12:13:04 -03001719 write_lock(&hci_dev_list_lock);
1720
Mat Martineau08add512011-11-02 16:18:36 -07001721 /* Do not allow HCI_AMP devices to register at index 0,
1722 * so the index can be used as the AMP controller ID.
1723 */
1724 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001725 head = &hci_dev_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
1727 /* Find first available device id */
1728 list_for_each(p, &hci_dev_list) {
Ulisses Furquimfc507442012-04-18 12:13:04 -03001729 int nid = list_entry(p, struct hci_dev, list)->id;
1730 if (nid > id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 break;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001732 if (nid == id)
1733 id++;
1734 head = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 sprintf(hdev->name, "hci%d", id);
1738 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001739
1740 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1741
Ulisses Furquimfc507442012-04-18 12:13:04 -03001742 list_add(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001744 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001746 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001747 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001748 if (!hdev->workqueue) {
1749 error = -ENOMEM;
1750 goto err;
1751 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001752
David Herrmann33ca9542011-10-08 14:58:49 +02001753 error = hci_add_sysfs(hdev);
1754 if (error < 0)
1755 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001757 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001758 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1759 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001760 if (hdev->rfkill) {
1761 if (rfkill_register(hdev->rfkill) < 0) {
1762 rfkill_destroy(hdev->rfkill);
1763 hdev->rfkill = NULL;
1764 }
1765 }
1766
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001767 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1768 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001769 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001770
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001772 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
1774 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001775
David Herrmann33ca9542011-10-08 14:58:49 +02001776err_wqueue:
1777 destroy_workqueue(hdev->workqueue);
1778err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001779 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001780 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001781 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001782
David Herrmann33ca9542011-10-08 14:58:49 +02001783 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784}
1785EXPORT_SYMBOL(hci_register_dev);
1786
1787/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001788void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789{
Marcel Holtmannef222012007-07-11 06:42:04 +02001790 int i;
1791
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001792 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
Johan Hovold94324962012-03-15 14:48:41 +01001794 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1795
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001796 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001798 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
1800 hci_dev_do_close(hdev);
1801
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301802 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001803 kfree_skb(hdev->reassembly[i]);
1804
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001805 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001806 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001807 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001808 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001809 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001810 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001811
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001812 /* mgmt_index_removed should take care of emptying the
1813 * pending list */
1814 BUG_ON(!list_empty(&hdev->mgmt_pending));
1815
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 hci_notify(hdev, HCI_DEV_UNREG);
1817
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001818 if (hdev->rfkill) {
1819 rfkill_unregister(hdev->rfkill);
1820 rfkill_destroy(hdev->rfkill);
1821 }
1822
David Herrmannce242972011-10-08 14:58:48 +02001823 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001824
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001825 destroy_workqueue(hdev->workqueue);
1826
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001827 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001828 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001829 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001830 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001831 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001832 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001833 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001834
David Herrmanndc946bd2012-01-07 15:47:24 +01001835 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836}
1837EXPORT_SYMBOL(hci_unregister_dev);
1838
1839/* Suspend HCI device */
1840int hci_suspend_dev(struct hci_dev *hdev)
1841{
1842 hci_notify(hdev, HCI_DEV_SUSPEND);
1843 return 0;
1844}
1845EXPORT_SYMBOL(hci_suspend_dev);
1846
1847/* Resume HCI device */
1848int hci_resume_dev(struct hci_dev *hdev)
1849{
1850 hci_notify(hdev, HCI_DEV_RESUME);
1851 return 0;
1852}
1853EXPORT_SYMBOL(hci_resume_dev);
1854
Marcel Holtmann76bca882009-11-18 00:40:39 +01001855/* Receive frame from HCI drivers */
1856int hci_recv_frame(struct sk_buff *skb)
1857{
1858 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1859 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001860 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001861 kfree_skb(skb);
1862 return -ENXIO;
1863 }
1864
1865 /* Incomming skb */
1866 bt_cb(skb)->incoming = 1;
1867
1868 /* Time stamp */
1869 __net_timestamp(skb);
1870
Marcel Holtmann76bca882009-11-18 00:40:39 +01001871 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001872 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001873
Marcel Holtmann76bca882009-11-18 00:40:39 +01001874 return 0;
1875}
1876EXPORT_SYMBOL(hci_recv_frame);
1877
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301878static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001879 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301880{
1881 int len = 0;
1882 int hlen = 0;
1883 int remain = count;
1884 struct sk_buff *skb;
1885 struct bt_skb_cb *scb;
1886
1887 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001888 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301889 return -EILSEQ;
1890
1891 skb = hdev->reassembly[index];
1892
1893 if (!skb) {
1894 switch (type) {
1895 case HCI_ACLDATA_PKT:
1896 len = HCI_MAX_FRAME_SIZE;
1897 hlen = HCI_ACL_HDR_SIZE;
1898 break;
1899 case HCI_EVENT_PKT:
1900 len = HCI_MAX_EVENT_SIZE;
1901 hlen = HCI_EVENT_HDR_SIZE;
1902 break;
1903 case HCI_SCODATA_PKT:
1904 len = HCI_MAX_SCO_SIZE;
1905 hlen = HCI_SCO_HDR_SIZE;
1906 break;
1907 }
1908
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001909 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301910 if (!skb)
1911 return -ENOMEM;
1912
1913 scb = (void *) skb->cb;
1914 scb->expect = hlen;
1915 scb->pkt_type = type;
1916
1917 skb->dev = (void *) hdev;
1918 hdev->reassembly[index] = skb;
1919 }
1920
1921 while (count) {
1922 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001923 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301924
1925 memcpy(skb_put(skb, len), data, len);
1926
1927 count -= len;
1928 data += len;
1929 scb->expect -= len;
1930 remain = count;
1931
1932 switch (type) {
1933 case HCI_EVENT_PKT:
1934 if (skb->len == HCI_EVENT_HDR_SIZE) {
1935 struct hci_event_hdr *h = hci_event_hdr(skb);
1936 scb->expect = h->plen;
1937
1938 if (skb_tailroom(skb) < scb->expect) {
1939 kfree_skb(skb);
1940 hdev->reassembly[index] = NULL;
1941 return -ENOMEM;
1942 }
1943 }
1944 break;
1945
1946 case HCI_ACLDATA_PKT:
1947 if (skb->len == HCI_ACL_HDR_SIZE) {
1948 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1949 scb->expect = __le16_to_cpu(h->dlen);
1950
1951 if (skb_tailroom(skb) < scb->expect) {
1952 kfree_skb(skb);
1953 hdev->reassembly[index] = NULL;
1954 return -ENOMEM;
1955 }
1956 }
1957 break;
1958
1959 case HCI_SCODATA_PKT:
1960 if (skb->len == HCI_SCO_HDR_SIZE) {
1961 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1962 scb->expect = h->dlen;
1963
1964 if (skb_tailroom(skb) < scb->expect) {
1965 kfree_skb(skb);
1966 hdev->reassembly[index] = NULL;
1967 return -ENOMEM;
1968 }
1969 }
1970 break;
1971 }
1972
1973 if (scb->expect == 0) {
1974 /* Complete frame */
1975
1976 bt_cb(skb)->pkt_type = type;
1977 hci_recv_frame(skb);
1978
1979 hdev->reassembly[index] = NULL;
1980 return remain;
1981 }
1982 }
1983
1984 return remain;
1985}
1986
Marcel Holtmannef222012007-07-11 06:42:04 +02001987int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1988{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301989 int rem = 0;
1990
Marcel Holtmannef222012007-07-11 06:42:04 +02001991 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1992 return -EILSEQ;
1993
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001994 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001995 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301996 if (rem < 0)
1997 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001998
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301999 data += (count - rem);
2000 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002001 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002002
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302003 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002004}
2005EXPORT_SYMBOL(hci_recv_fragment);
2006
Suraj Sumangala99811512010-07-14 13:02:19 +05302007#define STREAM_REASSEMBLY 0
2008
2009int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2010{
2011 int type;
2012 int rem = 0;
2013
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002014 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302015 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2016
2017 if (!skb) {
2018 struct { char type; } *pkt;
2019
2020 /* Start of the frame */
2021 pkt = data;
2022 type = pkt->type;
2023
2024 data++;
2025 count--;
2026 } else
2027 type = bt_cb(skb)->pkt_type;
2028
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002029 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002030 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302031 if (rem < 0)
2032 return rem;
2033
2034 data += (count - rem);
2035 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002036 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302037
2038 return rem;
2039}
2040EXPORT_SYMBOL(hci_recv_stream_fragment);
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042/* ---- Interface to upper protocols ---- */
2043
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044int hci_register_cb(struct hci_cb *cb)
2045{
2046 BT_DBG("%p name %s", cb, cb->name);
2047
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002048 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002050 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
2052 return 0;
2053}
2054EXPORT_SYMBOL(hci_register_cb);
2055
2056int hci_unregister_cb(struct hci_cb *cb)
2057{
2058 BT_DBG("%p name %s", cb, cb->name);
2059
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002060 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002062 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
2064 return 0;
2065}
2066EXPORT_SYMBOL(hci_unregister_cb);
2067
2068static int hci_send_frame(struct sk_buff *skb)
2069{
2070 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2071
2072 if (!hdev) {
2073 kfree_skb(skb);
2074 return -ENODEV;
2075 }
2076
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002077 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002079 /* Time stamp */
2080 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002082 /* Send copy to monitor */
2083 hci_send_to_monitor(hdev, skb);
2084
2085 if (atomic_read(&hdev->promisc)) {
2086 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002087 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 }
2089
2090 /* Get rid of skb owner, prior to sending to the driver. */
2091 skb_orphan(skb);
2092
2093 return hdev->send(skb);
2094}
2095
2096/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002097int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098{
2099 int len = HCI_COMMAND_HDR_SIZE + plen;
2100 struct hci_command_hdr *hdr;
2101 struct sk_buff *skb;
2102
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002103 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
2105 skb = bt_skb_alloc(len, GFP_ATOMIC);
2106 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002107 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 return -ENOMEM;
2109 }
2110
2111 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002112 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 hdr->plen = plen;
2114
2115 if (plen)
2116 memcpy(skb_put(skb, plen), param, plen);
2117
2118 BT_DBG("skb len %d", skb->len);
2119
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002120 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002122
Johan Hedberga5040ef2011-01-10 13:28:59 +02002123 if (test_bit(HCI_INIT, &hdev->flags))
2124 hdev->init_last_cmd = opcode;
2125
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002127 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 return 0;
2130}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
2132/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002133void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134{
2135 struct hci_command_hdr *hdr;
2136
2137 if (!hdev->sent_cmd)
2138 return NULL;
2139
2140 hdr = (void *) hdev->sent_cmd->data;
2141
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002142 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 return NULL;
2144
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002145 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
2147 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2148}
2149
2150/* Send ACL data */
2151static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2152{
2153 struct hci_acl_hdr *hdr;
2154 int len = skb->len;
2155
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002156 skb_push(skb, HCI_ACL_HDR_SIZE);
2157 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002158 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002159 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2160 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161}
2162
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002163static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002164 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165{
2166 struct hci_dev *hdev = conn->hdev;
2167 struct sk_buff *list;
2168
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002169 skb->len = skb_headlen(skb);
2170 skb->data_len = 0;
2171
2172 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2173 hci_add_acl_hdr(skb, conn->handle, flags);
2174
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002175 list = skb_shinfo(skb)->frag_list;
2176 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 /* Non fragmented */
2178 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2179
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002180 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 } else {
2182 /* Fragmented */
2183 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2184
2185 skb_shinfo(skb)->frag_list = NULL;
2186
2187 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002188 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002190 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002191
2192 flags &= ~ACL_START;
2193 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 do {
2195 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002198 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002199 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
2201 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2202
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002203 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 } while (list);
2205
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002206 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002208}
2209
2210void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2211{
2212 struct hci_conn *conn = chan->conn;
2213 struct hci_dev *hdev = conn->hdev;
2214
2215 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2216
2217 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002218
2219 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002221 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222}
2223EXPORT_SYMBOL(hci_send_acl);
2224
2225/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002226void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227{
2228 struct hci_dev *hdev = conn->hdev;
2229 struct hci_sco_hdr hdr;
2230
2231 BT_DBG("%s len %d", hdev->name, skb->len);
2232
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002233 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 hdr.dlen = skb->len;
2235
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002236 skb_push(skb, HCI_SCO_HDR_SIZE);
2237 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002238 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
2240 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002241 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002242
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002244 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245}
2246EXPORT_SYMBOL(hci_send_sco);
2247
2248/* ---- HCI TX task (outgoing data) ---- */
2249
2250/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002251static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2252 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253{
2254 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002255 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002256 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002258 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002260
2261 rcu_read_lock();
2262
2263 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002264 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002266
2267 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2268 continue;
2269
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 num++;
2271
2272 if (c->sent < min) {
2273 min = c->sent;
2274 conn = c;
2275 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002276
2277 if (hci_conn_num(hdev, type) == num)
2278 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 }
2280
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002281 rcu_read_unlock();
2282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002284 int cnt, q;
2285
2286 switch (conn->type) {
2287 case ACL_LINK:
2288 cnt = hdev->acl_cnt;
2289 break;
2290 case SCO_LINK:
2291 case ESCO_LINK:
2292 cnt = hdev->sco_cnt;
2293 break;
2294 case LE_LINK:
2295 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2296 break;
2297 default:
2298 cnt = 0;
2299 BT_ERR("Unknown link type");
2300 }
2301
2302 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 *quote = q ? q : 1;
2304 } else
2305 *quote = 0;
2306
2307 BT_DBG("conn %p quote %d", conn, *quote);
2308 return conn;
2309}
2310
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002311static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312{
2313 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002314 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315
Ville Tervobae1f5d92011-02-10 22:38:53 -03002316 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002318 rcu_read_lock();
2319
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002321 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002322 if (c->type == type && c->sent) {
2323 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002324 hdev->name, batostr(&c->dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 hci_acl_disconn(c, 0x13);
2326 }
2327 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002328
2329 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330}
2331
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002332static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2333 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002334{
2335 struct hci_conn_hash *h = &hdev->conn_hash;
2336 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002337 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002338 struct hci_conn *conn;
2339 int cnt, q, conn_num = 0;
2340
2341 BT_DBG("%s", hdev->name);
2342
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002343 rcu_read_lock();
2344
2345 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002346 struct hci_chan *tmp;
2347
2348 if (conn->type != type)
2349 continue;
2350
2351 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2352 continue;
2353
2354 conn_num++;
2355
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002356 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002357 struct sk_buff *skb;
2358
2359 if (skb_queue_empty(&tmp->data_q))
2360 continue;
2361
2362 skb = skb_peek(&tmp->data_q);
2363 if (skb->priority < cur_prio)
2364 continue;
2365
2366 if (skb->priority > cur_prio) {
2367 num = 0;
2368 min = ~0;
2369 cur_prio = skb->priority;
2370 }
2371
2372 num++;
2373
2374 if (conn->sent < min) {
2375 min = conn->sent;
2376 chan = tmp;
2377 }
2378 }
2379
2380 if (hci_conn_num(hdev, type) == conn_num)
2381 break;
2382 }
2383
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002384 rcu_read_unlock();
2385
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002386 if (!chan)
2387 return NULL;
2388
2389 switch (chan->conn->type) {
2390 case ACL_LINK:
2391 cnt = hdev->acl_cnt;
2392 break;
2393 case SCO_LINK:
2394 case ESCO_LINK:
2395 cnt = hdev->sco_cnt;
2396 break;
2397 case LE_LINK:
2398 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2399 break;
2400 default:
2401 cnt = 0;
2402 BT_ERR("Unknown link type");
2403 }
2404
2405 q = cnt / num;
2406 *quote = q ? q : 1;
2407 BT_DBG("chan %p quote %d", chan, *quote);
2408 return chan;
2409}
2410
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002411static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2412{
2413 struct hci_conn_hash *h = &hdev->conn_hash;
2414 struct hci_conn *conn;
2415 int num = 0;
2416
2417 BT_DBG("%s", hdev->name);
2418
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002419 rcu_read_lock();
2420
2421 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002422 struct hci_chan *chan;
2423
2424 if (conn->type != type)
2425 continue;
2426
2427 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2428 continue;
2429
2430 num++;
2431
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002432 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002433 struct sk_buff *skb;
2434
2435 if (chan->sent) {
2436 chan->sent = 0;
2437 continue;
2438 }
2439
2440 if (skb_queue_empty(&chan->data_q))
2441 continue;
2442
2443 skb = skb_peek(&chan->data_q);
2444 if (skb->priority >= HCI_PRIO_MAX - 1)
2445 continue;
2446
2447 skb->priority = HCI_PRIO_MAX - 1;
2448
2449 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002450 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002451 }
2452
2453 if (hci_conn_num(hdev, type) == num)
2454 break;
2455 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002456
2457 rcu_read_unlock();
2458
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002459}
2460
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002461static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2462{
2463 /* Calculate count of blocks used by this packet */
2464 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2465}
2466
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002467static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 if (!test_bit(HCI_RAW, &hdev->flags)) {
2470 /* ACL tx timeout must be longer than maximum
2471 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002472 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002473 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002474 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002476}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002478static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002479{
2480 unsigned int cnt = hdev->acl_cnt;
2481 struct hci_chan *chan;
2482 struct sk_buff *skb;
2483 int quote;
2484
2485 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002486
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002487 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002488 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002489 u32 priority = (skb_peek(&chan->data_q))->priority;
2490 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002491 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002492 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002493
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002494 /* Stop if priority has changed */
2495 if (skb->priority < priority)
2496 break;
2497
2498 skb = skb_dequeue(&chan->data_q);
2499
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002500 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002501 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002502
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 hci_send_frame(skb);
2504 hdev->acl_last_tx = jiffies;
2505
2506 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002507 chan->sent++;
2508 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 }
2510 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002511
2512 if (cnt != hdev->acl_cnt)
2513 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514}
2515
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002516static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002517{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002518 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002519 struct hci_chan *chan;
2520 struct sk_buff *skb;
2521 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002522
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002523 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002524
2525 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002526 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002527 u32 priority = (skb_peek(&chan->data_q))->priority;
2528 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2529 int blocks;
2530
2531 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002532 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002533
2534 /* Stop if priority has changed */
2535 if (skb->priority < priority)
2536 break;
2537
2538 skb = skb_dequeue(&chan->data_q);
2539
2540 blocks = __get_blocks(hdev, skb);
2541 if (blocks > hdev->block_cnt)
2542 return;
2543
2544 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002545 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002546
2547 hci_send_frame(skb);
2548 hdev->acl_last_tx = jiffies;
2549
2550 hdev->block_cnt -= blocks;
2551 quote -= blocks;
2552
2553 chan->sent += blocks;
2554 chan->conn->sent += blocks;
2555 }
2556 }
2557
2558 if (cnt != hdev->block_cnt)
2559 hci_prio_recalculate(hdev, ACL_LINK);
2560}
2561
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002562static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002563{
2564 BT_DBG("%s", hdev->name);
2565
2566 if (!hci_conn_num(hdev, ACL_LINK))
2567 return;
2568
2569 switch (hdev->flow_ctl_mode) {
2570 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2571 hci_sched_acl_pkt(hdev);
2572 break;
2573
2574 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2575 hci_sched_acl_blk(hdev);
2576 break;
2577 }
2578}
2579
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002581static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582{
2583 struct hci_conn *conn;
2584 struct sk_buff *skb;
2585 int quote;
2586
2587 BT_DBG("%s", hdev->name);
2588
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002589 if (!hci_conn_num(hdev, SCO_LINK))
2590 return;
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2593 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2594 BT_DBG("skb %p len %d", skb, skb->len);
2595 hci_send_frame(skb);
2596
2597 conn->sent++;
2598 if (conn->sent == ~0)
2599 conn->sent = 0;
2600 }
2601 }
2602}
2603
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002604static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002605{
2606 struct hci_conn *conn;
2607 struct sk_buff *skb;
2608 int quote;
2609
2610 BT_DBG("%s", hdev->name);
2611
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002612 if (!hci_conn_num(hdev, ESCO_LINK))
2613 return;
2614
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002615 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2616 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002617 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2618 BT_DBG("skb %p len %d", skb, skb->len);
2619 hci_send_frame(skb);
2620
2621 conn->sent++;
2622 if (conn->sent == ~0)
2623 conn->sent = 0;
2624 }
2625 }
2626}
2627
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002628static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002629{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002630 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002631 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002632 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002633
2634 BT_DBG("%s", hdev->name);
2635
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002636 if (!hci_conn_num(hdev, LE_LINK))
2637 return;
2638
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002639 if (!test_bit(HCI_RAW, &hdev->flags)) {
2640 /* LE tx timeout must be longer than maximum
2641 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002642 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002643 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002644 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002645 }
2646
2647 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002648 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002649 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002650 u32 priority = (skb_peek(&chan->data_q))->priority;
2651 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002652 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002653 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002654
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002655 /* Stop if priority has changed */
2656 if (skb->priority < priority)
2657 break;
2658
2659 skb = skb_dequeue(&chan->data_q);
2660
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002661 hci_send_frame(skb);
2662 hdev->le_last_tx = jiffies;
2663
2664 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002665 chan->sent++;
2666 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002667 }
2668 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002669
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002670 if (hdev->le_pkts)
2671 hdev->le_cnt = cnt;
2672 else
2673 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002674
2675 if (cnt != tmp)
2676 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002677}
2678
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002679static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002681 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 struct sk_buff *skb;
2683
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002684 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002685 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
2687 /* Schedule queues and send stuff to HCI driver */
2688
2689 hci_sched_acl(hdev);
2690
2691 hci_sched_sco(hdev);
2692
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002693 hci_sched_esco(hdev);
2694
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002695 hci_sched_le(hdev);
2696
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 /* Send next queued raw (unknown type) packet */
2698 while ((skb = skb_dequeue(&hdev->raw_q)))
2699 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700}
2701
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002702/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
2704/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002705static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706{
2707 struct hci_acl_hdr *hdr = (void *) skb->data;
2708 struct hci_conn *conn;
2709 __u16 handle, flags;
2710
2711 skb_pull(skb, HCI_ACL_HDR_SIZE);
2712
2713 handle = __le16_to_cpu(hdr->handle);
2714 flags = hci_flags(handle);
2715 handle = hci_handle(handle);
2716
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002717 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2718 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
2720 hdev->stat.acl_rx++;
2721
2722 hci_dev_lock(hdev);
2723 conn = hci_conn_hash_lookup_handle(hdev, handle);
2724 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002725
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002727 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002728
Johan Hedberg671267b2012-05-12 16:11:50 -03002729 hci_dev_lock(hdev);
2730 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2731 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2732 mgmt_device_connected(hdev, &conn->dst, conn->type,
2733 conn->dst_type, 0, NULL, 0,
2734 conn->dev_class);
2735 hci_dev_unlock(hdev);
2736
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002738 l2cap_recv_acldata(conn, skb, flags);
2739 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002741 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002742 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 }
2744
2745 kfree_skb(skb);
2746}
2747
2748/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002749static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750{
2751 struct hci_sco_hdr *hdr = (void *) skb->data;
2752 struct hci_conn *conn;
2753 __u16 handle;
2754
2755 skb_pull(skb, HCI_SCO_HDR_SIZE);
2756
2757 handle = __le16_to_cpu(hdr->handle);
2758
2759 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2760
2761 hdev->stat.sco_rx++;
2762
2763 hci_dev_lock(hdev);
2764 conn = hci_conn_hash_lookup_handle(hdev, handle);
2765 hci_dev_unlock(hdev);
2766
2767 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002769 sco_recv_scodata(conn, skb);
2770 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002772 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002773 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 }
2775
2776 kfree_skb(skb);
2777}
2778
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002779static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002781 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 struct sk_buff *skb;
2783
2784 BT_DBG("%s", hdev->name);
2785
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002787 /* Send copy to monitor */
2788 hci_send_to_monitor(hdev, skb);
2789
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 if (atomic_read(&hdev->promisc)) {
2791 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002792 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 }
2794
2795 if (test_bit(HCI_RAW, &hdev->flags)) {
2796 kfree_skb(skb);
2797 continue;
2798 }
2799
2800 if (test_bit(HCI_INIT, &hdev->flags)) {
2801 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002802 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 case HCI_ACLDATA_PKT:
2804 case HCI_SCODATA_PKT:
2805 kfree_skb(skb);
2806 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002807 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 }
2809
2810 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002811 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002813 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 hci_event_packet(hdev, skb);
2815 break;
2816
2817 case HCI_ACLDATA_PKT:
2818 BT_DBG("%s ACL data packet", hdev->name);
2819 hci_acldata_packet(hdev, skb);
2820 break;
2821
2822 case HCI_SCODATA_PKT:
2823 BT_DBG("%s SCO data packet", hdev->name);
2824 hci_scodata_packet(hdev, skb);
2825 break;
2826
2827 default:
2828 kfree_skb(skb);
2829 break;
2830 }
2831 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832}
2833
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002834static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002836 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 struct sk_buff *skb;
2838
2839 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2840
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002842 if (atomic_read(&hdev->cmd_cnt)) {
2843 skb = skb_dequeue(&hdev->cmd_q);
2844 if (!skb)
2845 return;
2846
Wei Yongjun7585b972009-02-25 18:29:52 +08002847 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002849 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2850 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 atomic_dec(&hdev->cmd_cnt);
2852 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002853 if (test_bit(HCI_RESET, &hdev->flags))
2854 del_timer(&hdev->cmd_timer);
2855 else
2856 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002857 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 } else {
2859 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002860 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 }
2862 }
2863}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002864
2865int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2866{
2867 /* General inquiry access code (GIAC) */
2868 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2869 struct hci_cp_inquiry cp;
2870
2871 BT_DBG("%s", hdev->name);
2872
2873 if (test_bit(HCI_INQUIRY, &hdev->flags))
2874 return -EINPROGRESS;
2875
Johan Hedberg46632622012-01-02 16:06:08 +02002876 inquiry_cache_flush(hdev);
2877
Andre Guedes2519a1f2011-11-07 11:45:24 -03002878 memset(&cp, 0, sizeof(cp));
2879 memcpy(&cp.lap, lap, sizeof(cp.lap));
2880 cp.length = length;
2881
2882 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2883}
Andre Guedes023d50492011-11-04 14:16:52 -03002884
2885int hci_cancel_inquiry(struct hci_dev *hdev)
2886{
2887 BT_DBG("%s", hdev->name);
2888
2889 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002890 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002891
2892 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2893}
Andre Guedes31f79562012-04-24 21:02:53 -03002894
2895u8 bdaddr_to_le(u8 bdaddr_type)
2896{
2897 switch (bdaddr_type) {
2898 case BDADDR_LE_PUBLIC:
2899 return ADDR_LE_DEV_PUBLIC;
2900
2901 default:
2902 /* Fallback to LE Random address type */
2903 return ADDR_LE_DEV_RANDOM;
2904 }
2905}