blob: cc3d164f56fb50c0bfbce6b2b648cbde9190c652 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Marcel Holtmannb78752c2010-08-08 23:06:53 -040056static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020057static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020058static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/* ---- HCI requests ---- */
76
Johan Hedberg23bb5762010-12-21 23:01:27 +020077void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Johan Hedberg23bb5762010-12-21 23:01:27 +020079 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
Johan Hedberga5040ef2011-01-10 13:28:59 +020081 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020084 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020086 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020087 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020096 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020097 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100128 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Johan Hedberga5040ef2011-01-10 13:28:59 +0200162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100170 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret;
173
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300190 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200196 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Mandatory initialization */
203
204 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200234 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700238 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246static void amp_init(struct hci_dev *hdev)
247{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
Marcel Holtmanna418b892008-11-30 12:17:28 +0100333 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900339/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200343 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200351 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200362
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200363bool hci_discovery_active(struct hci_dev *hdev)
364{
365 struct discovery_state *discov = &hdev->discovery;
366
Andre Guedes6fbe1952012-02-03 17:47:58 -0300367 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300368 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300369 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200370 return true;
371
Andre Guedes6fbe1952012-02-03 17:47:58 -0300372 default:
373 return false;
374 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200375}
376
Johan Hedbergff9ef572012-01-04 14:23:45 +0200377void hci_discovery_set_state(struct hci_dev *hdev, int state)
378{
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381 if (hdev->discovery.state == state)
382 return;
383
384 switch (state) {
385 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
Johan Hedbergf963e8e2012-02-20 23:30:44 +0200388 hdev->discovery.type = 0;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200389 break;
390 case DISCOVERY_STARTING:
391 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300392 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200393 mgmt_discovering(hdev, 1);
394 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200395 case DISCOVERY_RESOLVING:
396 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200397 case DISCOVERY_STOPPING:
398 break;
399 }
400
401 hdev->discovery.state = state;
402}
403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static void inquiry_cache_flush(struct hci_dev *hdev)
405{
Johan Hedberg30883512012-01-04 14:16:21 +0200406 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200407 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Johan Hedberg561aafb2012-01-04 13:31:59 +0200409 list_for_each_entry_safe(p, n, &cache->all, all) {
410 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200411 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200413
414 INIT_LIST_HEAD(&cache->unknown);
415 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419{
Johan Hedberg30883512012-01-04 14:16:21 +0200420 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200427 return e;
428 }
429
430 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Johan Hedberg561aafb2012-01-04 13:31:59 +0200433struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300434 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200435{
Johan Hedberg30883512012-01-04 14:16:21 +0200436 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441 list_for_each_entry(e, &cache->unknown, list) {
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200449struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300450 bdaddr_t *bdaddr,
451 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct inquiry_entry *e;
455
456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458 list_for_each_entry(e, &cache->resolve, list) {
459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 return e;
461 if (!bacmp(&e->data.bdaddr, bdaddr))
462 return e;
463 }
464
465 return NULL;
466}
467
Johan Hedberga3d4e202012-01-09 00:53:02 +0200468void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300469 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct list_head *pos = &cache->resolve;
473 struct inquiry_entry *p;
474
475 list_del(&ie->list);
476
477 list_for_each_entry(p, &cache->resolve, list) {
478 if (p->name_state != NAME_PENDING &&
479 abs(p->data.rssi) >= abs(ie->data.rssi))
480 break;
481 pos = &p->list;
482 }
483
484 list_add(&ie->list, pos);
485}
486
Johan Hedberg31754052012-01-04 13:39:52 +0200487bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300488 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Johan Hedberg30883512012-01-04 14:16:21 +0200490 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200495 if (ssp)
496 *ssp = data->ssp_mode;
497
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200498 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200499 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200500 if (ie->data.ssp_mode && ssp)
501 *ssp = true;
502
Johan Hedberga3d4e202012-01-09 00:53:02 +0200503 if (ie->name_state == NAME_NEEDED &&
504 data->rssi != ie->data.rssi) {
505 ie->data.rssi = data->rssi;
506 hci_inquiry_cache_update_resolve(hdev, ie);
507 }
508
Johan Hedberg561aafb2012-01-04 13:31:59 +0200509 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200510 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200511
Johan Hedberg561aafb2012-01-04 13:31:59 +0200512 /* Entry not in the cache. Add new one. */
513 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
514 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200515 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200516
517 list_add(&ie->all, &cache->all);
518
519 if (name_known) {
520 ie->name_state = NAME_KNOWN;
521 } else {
522 ie->name_state = NAME_NOT_KNOWN;
523 list_add(&ie->list, &cache->unknown);
524 }
525
526update:
527 if (name_known && ie->name_state != NAME_KNOWN &&
528 ie->name_state != NAME_PENDING) {
529 ie->name_state = NAME_KNOWN;
530 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 }
532
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200533 memcpy(&ie->data, data, sizeof(*data));
534 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200536
537 if (ie->name_state == NAME_NOT_KNOWN)
538 return false;
539
540 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
543static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544{
Johan Hedberg30883512012-01-04 14:16:21 +0200545 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 struct inquiry_info *info = (struct inquiry_info *) buf;
547 struct inquiry_entry *e;
548 int copied = 0;
549
Johan Hedberg561aafb2012-01-04 13:31:59 +0200550 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200552
553 if (copied >= num)
554 break;
555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 bacpy(&info->bdaddr, &data->bdaddr);
557 info->pscan_rep_mode = data->pscan_rep_mode;
558 info->pscan_period_mode = data->pscan_period_mode;
559 info->pscan_mode = data->pscan_mode;
560 memcpy(info->dev_class, data->dev_class, 3);
561 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200564 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
567 BT_DBG("cache %p, copied %d", cache, copied);
568 return copied;
569}
570
571static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572{
573 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
574 struct hci_cp_inquiry cp;
575
576 BT_DBG("%s", hdev->name);
577
578 if (test_bit(HCI_INQUIRY, &hdev->flags))
579 return;
580
581 /* Start Inquiry */
582 memcpy(&cp.lap, &ir->lap, 3);
583 cp.length = ir->length;
584 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200585 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586}
587
588int hci_inquiry(void __user *arg)
589{
590 __u8 __user *ptr = arg;
591 struct hci_inquiry_req ir;
592 struct hci_dev *hdev;
593 int err = 0, do_inquiry = 0, max_rsp;
594 long timeo;
595 __u8 *buf;
596
597 if (copy_from_user(&ir, ptr, sizeof(ir)))
598 return -EFAULT;
599
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200600 hdev = hci_dev_get(ir.dev_id);
601 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return -ENODEV;
603
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300604 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900605 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200606 inquiry_cache_empty(hdev) ||
607 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 inquiry_cache_flush(hdev);
609 do_inquiry = 1;
610 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300611 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Marcel Holtmann04837f62006-07-03 10:02:33 +0200613 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200614
615 if (do_inquiry) {
616 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
617 if (err < 0)
618 goto done;
619 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 /* for unlimited number of responses we will use buffer with 255 entries */
622 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623
624 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
625 * copy it to the user space.
626 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100627 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200628 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = -ENOMEM;
630 goto done;
631 }
632
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300633 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300635 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 BT_DBG("num_rsp %d", ir.num_rsp);
638
639 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
640 ptr += sizeof(ir);
641 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
642 ir.num_rsp))
643 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900644 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 err = -EFAULT;
646
647 kfree(buf);
648
649done:
650 hci_dev_put(hdev);
651 return err;
652}
653
654/* ---- HCI ioctl helpers ---- */
655
656int hci_dev_open(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int ret = 0;
660
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200661 hdev = hci_dev_get(dev);
662 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 return -ENODEV;
664
665 BT_DBG("%s %p", hdev->name, hdev);
666
667 hci_req_lock(hdev);
668
Johan Hovold94324962012-03-15 14:48:41 +0100669 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
670 ret = -ENODEV;
671 goto done;
672 }
673
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200674 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
675 ret = -ERFKILL;
676 goto done;
677 }
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 if (test_bit(HCI_UP, &hdev->flags)) {
680 ret = -EALREADY;
681 goto done;
682 }
683
684 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
685 set_bit(HCI_RAW, &hdev->flags);
686
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200687 /* Treat all non BR/EDR controllers as raw devices if
688 enable_hs is not set */
689 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100690 set_bit(HCI_RAW, &hdev->flags);
691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 if (hdev->open(hdev)) {
693 ret = -EIO;
694 goto done;
695 }
696
697 if (!test_bit(HCI_RAW, &hdev->flags)) {
698 atomic_set(&hdev->cmd_cnt, 1);
699 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200700 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Marcel Holtmann04837f62006-07-03 10:02:33 +0200702 ret = __hci_request(hdev, hci_init_req, 0,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
Andre Guedeseead27d2011-06-30 19:20:55 -0300705 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300706 ret = __hci_request(hdev, hci_le_init_req, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 clear_bit(HCI_INIT, &hdev->flags);
710 }
711
712 if (!ret) {
713 hci_dev_hold(hdev);
714 set_bit(HCI_UP, &hdev->flags);
715 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200716 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300717 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200718 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300719 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200720 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900721 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200723 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200724 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400725 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
727 skb_queue_purge(&hdev->cmd_q);
728 skb_queue_purge(&hdev->rx_q);
729
730 if (hdev->flush)
731 hdev->flush(hdev);
732
733 if (hdev->sent_cmd) {
734 kfree_skb(hdev->sent_cmd);
735 hdev->sent_cmd = NULL;
736 }
737
738 hdev->close(hdev);
739 hdev->flags = 0;
740 }
741
742done:
743 hci_req_unlock(hdev);
744 hci_dev_put(hdev);
745 return ret;
746}
747
748static int hci_dev_do_close(struct hci_dev *hdev)
749{
750 BT_DBG("%s %p", hdev->name, hdev);
751
Andre Guedes28b75a82012-02-03 17:48:00 -0300752 cancel_work_sync(&hdev->le_scan);
753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 hci_req_cancel(hdev, ENODEV);
755 hci_req_lock(hdev);
756
757 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300758 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 hci_req_unlock(hdev);
760 return 0;
761 }
762
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200763 /* Flush RX and TX works */
764 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400765 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200767 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200768 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200769 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200770 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200771 }
772
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200773 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200774 cancel_delayed_work(&hdev->service_cache);
775
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300776 cancel_delayed_work_sync(&hdev->le_scan_disable);
777
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300778 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 inquiry_cache_flush(hdev);
780 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300781 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
783 hci_notify(hdev, HCI_DEV_DOWN);
784
785 if (hdev->flush)
786 hdev->flush(hdev);
787
788 /* Reset device */
789 skb_queue_purge(&hdev->cmd_q);
790 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200791 if (!test_bit(HCI_RAW, &hdev->flags) &&
792 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200794 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200795 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 clear_bit(HCI_INIT, &hdev->flags);
797 }
798
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200799 /* flush cmd work */
800 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802 /* Drop queues */
803 skb_queue_purge(&hdev->rx_q);
804 skb_queue_purge(&hdev->cmd_q);
805 skb_queue_purge(&hdev->raw_q);
806
807 /* Drop last sent command */
808 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300809 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 kfree_skb(hdev->sent_cmd);
811 hdev->sent_cmd = NULL;
812 }
813
814 /* After this point our queues are empty
815 * and no tasks are scheduled. */
816 hdev->close(hdev);
817
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100818 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819 hci_dev_lock(hdev);
820 mgmt_powered(hdev, 0);
821 hci_dev_unlock(hdev);
822 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 /* Clear flags */
825 hdev->flags = 0;
826
Johan Hedberge59fda82012-02-22 18:11:53 +0200827 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200828 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 hci_req_unlock(hdev);
831
832 hci_dev_put(hdev);
833 return 0;
834}
835
836int hci_dev_close(__u16 dev)
837{
838 struct hci_dev *hdev;
839 int err;
840
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200841 hdev = hci_dev_get(dev);
842 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100844
845 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
846 cancel_delayed_work(&hdev->power_off);
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 hci_dev_put(hdev);
851 return err;
852}
853
854int hci_dev_reset(__u16 dev)
855{
856 struct hci_dev *hdev;
857 int ret = 0;
858
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200859 hdev = hci_dev_get(dev);
860 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 return -ENODEV;
862
863 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 if (!test_bit(HCI_UP, &hdev->flags))
866 goto done;
867
868 /* Drop queues */
869 skb_queue_purge(&hdev->rx_q);
870 skb_queue_purge(&hdev->cmd_q);
871
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300872 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 inquiry_cache_flush(hdev);
874 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300875 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
877 if (hdev->flush)
878 hdev->flush(hdev);
879
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900880 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300881 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
883 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200884 ret = __hci_request(hdev, hci_reset_req, 0,
885 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 hci_req_unlock(hdev);
889 hci_dev_put(hdev);
890 return ret;
891}
892
893int hci_dev_reset_stat(__u16 dev)
894{
895 struct hci_dev *hdev;
896 int ret = 0;
897
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200898 hdev = hci_dev_get(dev);
899 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return -ENODEV;
901
902 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
903
904 hci_dev_put(hdev);
905
906 return ret;
907}
908
909int hci_dev_cmd(unsigned int cmd, void __user *arg)
910{
911 struct hci_dev *hdev;
912 struct hci_dev_req dr;
913 int err = 0;
914
915 if (copy_from_user(&dr, arg, sizeof(dr)))
916 return -EFAULT;
917
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200918 hdev = hci_dev_get(dr.dev_id);
919 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return -ENODEV;
921
922 switch (cmd) {
923 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200924 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
925 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 break;
927
928 case HCISETENCRYPT:
929 if (!lmp_encrypt_capable(hdev)) {
930 err = -EOPNOTSUPP;
931 break;
932 }
933
934 if (!test_bit(HCI_AUTH, &hdev->flags)) {
935 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200936 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
937 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 if (err)
939 break;
940 }
941
Marcel Holtmann04837f62006-07-03 10:02:33 +0200942 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
943 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 break;
945
946 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200947 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
948 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 break;
950
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200951 case HCISETLINKPOL:
952 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
953 msecs_to_jiffies(HCI_INIT_TIMEOUT));
954 break;
955
956 case HCISETLINKMODE:
957 hdev->link_mode = ((__u16) dr.dev_opt) &
958 (HCI_LM_MASTER | HCI_LM_ACCEPT);
959 break;
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 case HCISETPTYPE:
962 hdev->pkt_type = (__u16) dr.dev_opt;
963 break;
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200966 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
967 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 break;
969
970 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200971 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
972 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 break;
974
975 default:
976 err = -EINVAL;
977 break;
978 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 hci_dev_put(hdev);
981 return err;
982}
983
984int hci_get_dev_list(void __user *arg)
985{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200986 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 struct hci_dev_list_req *dl;
988 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 int n = 0, size, err;
990 __u16 dev_num;
991
992 if (get_user(dev_num, (__u16 __user *) arg))
993 return -EFAULT;
994
995 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
996 return -EINVAL;
997
998 size = sizeof(*dl) + dev_num * sizeof(*dr);
999
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001000 dl = kzalloc(size, GFP_KERNEL);
1001 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 return -ENOMEM;
1003
1004 dr = dl->dev_req;
1005
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001006 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001007 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001008 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001009 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001010
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001011 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1012 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 (dr + n)->dev_id = hdev->id;
1015 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 if (++n >= dev_num)
1018 break;
1019 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001020 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 dl->dev_num = n;
1023 size = sizeof(*dl) + n * sizeof(*dr);
1024
1025 err = copy_to_user(arg, dl, size);
1026 kfree(dl);
1027
1028 return err ? -EFAULT : 0;
1029}
1030
1031int hci_get_dev_info(void __user *arg)
1032{
1033 struct hci_dev *hdev;
1034 struct hci_dev_info di;
1035 int err = 0;
1036
1037 if (copy_from_user(&di, arg, sizeof(di)))
1038 return -EFAULT;
1039
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001040 hdev = hci_dev_get(di.dev_id);
1041 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 return -ENODEV;
1043
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001044 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001045 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001046
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001047 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1048 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 strcpy(di.name, hdev->name);
1051 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001052 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 di.flags = hdev->flags;
1054 di.pkt_type = hdev->pkt_type;
1055 di.acl_mtu = hdev->acl_mtu;
1056 di.acl_pkts = hdev->acl_pkts;
1057 di.sco_mtu = hdev->sco_mtu;
1058 di.sco_pkts = hdev->sco_pkts;
1059 di.link_policy = hdev->link_policy;
1060 di.link_mode = hdev->link_mode;
1061
1062 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1063 memcpy(&di.features, &hdev->features, sizeof(di.features));
1064
1065 if (copy_to_user(arg, &di, sizeof(di)))
1066 err = -EFAULT;
1067
1068 hci_dev_put(hdev);
1069
1070 return err;
1071}
1072
1073/* ---- Interface to HCI drivers ---- */
1074
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001075static int hci_rfkill_set_block(void *data, bool blocked)
1076{
1077 struct hci_dev *hdev = data;
1078
1079 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1080
1081 if (!blocked)
1082 return 0;
1083
1084 hci_dev_do_close(hdev);
1085
1086 return 0;
1087}
1088
1089static const struct rfkill_ops hci_rfkill_ops = {
1090 .set_block = hci_rfkill_set_block,
1091};
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/* Alloc HCI device */
1094struct hci_dev *hci_alloc_dev(void)
1095{
1096 struct hci_dev *hdev;
1097
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001098 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 if (!hdev)
1100 return NULL;
1101
David Herrmann0ac7e702011-10-08 14:58:47 +02001102 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 skb_queue_head_init(&hdev->driver_init);
1104
1105 return hdev;
1106}
1107EXPORT_SYMBOL(hci_alloc_dev);
1108
1109/* Free HCI device */
1110void hci_free_dev(struct hci_dev *hdev)
1111{
1112 skb_queue_purge(&hdev->driver_init);
1113
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001114 /* will free via device release */
1115 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116}
1117EXPORT_SYMBOL(hci_free_dev);
1118
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001119static void hci_power_on(struct work_struct *work)
1120{
1121 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1122
1123 BT_DBG("%s", hdev->name);
1124
1125 if (hci_dev_open(hdev->id) < 0)
1126 return;
1127
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001128 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001129 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001130 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001131
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001132 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001133 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001134}
1135
1136static void hci_power_off(struct work_struct *work)
1137{
Johan Hedberg32435532011-11-07 22:16:04 +02001138 struct hci_dev *hdev = container_of(work, struct hci_dev,
1139 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001140
1141 BT_DBG("%s", hdev->name);
1142
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001143 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001144}
1145
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001146static void hci_discov_off(struct work_struct *work)
1147{
1148 struct hci_dev *hdev;
1149 u8 scan = SCAN_PAGE;
1150
1151 hdev = container_of(work, struct hci_dev, discov_off.work);
1152
1153 BT_DBG("%s", hdev->name);
1154
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001155 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001156
1157 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1158
1159 hdev->discov_timeout = 0;
1160
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001161 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001162}
1163
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001164int hci_uuids_clear(struct hci_dev *hdev)
1165{
1166 struct list_head *p, *n;
1167
1168 list_for_each_safe(p, n, &hdev->uuids) {
1169 struct bt_uuid *uuid;
1170
1171 uuid = list_entry(p, struct bt_uuid, list);
1172
1173 list_del(p);
1174 kfree(uuid);
1175 }
1176
1177 return 0;
1178}
1179
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001180int hci_link_keys_clear(struct hci_dev *hdev)
1181{
1182 struct list_head *p, *n;
1183
1184 list_for_each_safe(p, n, &hdev->link_keys) {
1185 struct link_key *key;
1186
1187 key = list_entry(p, struct link_key, list);
1188
1189 list_del(p);
1190 kfree(key);
1191 }
1192
1193 return 0;
1194}
1195
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001196int hci_smp_ltks_clear(struct hci_dev *hdev)
1197{
1198 struct smp_ltk *k, *tmp;
1199
1200 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1201 list_del(&k->list);
1202 kfree(k);
1203 }
1204
1205 return 0;
1206}
1207
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001208struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1209{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001210 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001211
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001212 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001213 if (bacmp(bdaddr, &k->bdaddr) == 0)
1214 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001215
1216 return NULL;
1217}
1218
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301219static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001220 u8 key_type, u8 old_key_type)
1221{
1222 /* Legacy key */
1223 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301224 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001225
1226 /* Debug keys are insecure so don't store them persistently */
1227 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301228 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001229
1230 /* Changed combination key and there's no previous one */
1231 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301232 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001233
1234 /* Security mode 3 case */
1235 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301236 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001237
1238 /* Neither local nor remote side had no-bonding as requirement */
1239 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301240 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001241
1242 /* Local side had dedicated bonding as requirement */
1243 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301244 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001245
1246 /* Remote side had dedicated bonding as requirement */
1247 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301248 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001249
1250 /* If none of the above criteria match, then don't store the key
1251 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301252 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001253}
1254
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001255struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001256{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001257 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001258
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001259 list_for_each_entry(k, &hdev->long_term_keys, list) {
1260 if (k->ediv != ediv ||
1261 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001262 continue;
1263
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001264 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001265 }
1266
1267 return NULL;
1268}
1269EXPORT_SYMBOL(hci_find_ltk);
1270
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001271struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001272 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001273{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001274 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001275
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001276 list_for_each_entry(k, &hdev->long_term_keys, list)
1277 if (addr_type == k->bdaddr_type &&
1278 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001279 return k;
1280
1281 return NULL;
1282}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001283EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001284
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001285int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001286 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001287{
1288 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301289 u8 old_key_type;
1290 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001291
1292 old_key = hci_find_link_key(hdev, bdaddr);
1293 if (old_key) {
1294 old_key_type = old_key->type;
1295 key = old_key;
1296 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001297 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001298 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1299 if (!key)
1300 return -ENOMEM;
1301 list_add(&key->list, &hdev->link_keys);
1302 }
1303
1304 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1305
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001306 /* Some buggy controller combinations generate a changed
1307 * combination key for legacy pairing even when there's no
1308 * previous key */
1309 if (type == HCI_LK_CHANGED_COMBINATION &&
1310 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001311 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001312 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001313 if (conn)
1314 conn->key_type = type;
1315 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001316
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001317 bacpy(&key->bdaddr, bdaddr);
1318 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001319 key->pin_len = pin_len;
1320
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001321 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001322 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001323 else
1324 key->type = type;
1325
Johan Hedberg4df378a2011-04-28 11:29:03 -07001326 if (!new_key)
1327 return 0;
1328
1329 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1330
Johan Hedberg744cf192011-11-08 20:40:14 +02001331 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001332
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301333 if (conn)
1334 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001335
1336 return 0;
1337}
1338
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001339int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001340 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001341 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001342{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001343 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001344
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001345 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1346 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001347
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001348 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1349 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001350 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001351 else {
1352 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001353 if (!key)
1354 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001355 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001356 }
1357
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001358 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001359 key->bdaddr_type = addr_type;
1360 memcpy(key->val, tk, sizeof(key->val));
1361 key->authenticated = authenticated;
1362 key->ediv = ediv;
1363 key->enc_size = enc_size;
1364 key->type = type;
1365 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001366
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001367 if (!new_key)
1368 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001369
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001370 if (type & HCI_SMP_LTK)
1371 mgmt_new_ltk(hdev, key, 1);
1372
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001373 return 0;
1374}
1375
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001376int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1377{
1378 struct link_key *key;
1379
1380 key = hci_find_link_key(hdev, bdaddr);
1381 if (!key)
1382 return -ENOENT;
1383
1384 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1385
1386 list_del(&key->list);
1387 kfree(key);
1388
1389 return 0;
1390}
1391
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001392int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1393{
1394 struct smp_ltk *k, *tmp;
1395
1396 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1397 if (bacmp(bdaddr, &k->bdaddr))
1398 continue;
1399
1400 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1401
1402 list_del(&k->list);
1403 kfree(k);
1404 }
1405
1406 return 0;
1407}
1408
Ville Tervo6bd32322011-02-16 16:32:41 +02001409/* HCI command timer function */
1410static void hci_cmd_timer(unsigned long arg)
1411{
1412 struct hci_dev *hdev = (void *) arg;
1413
1414 BT_ERR("%s command tx timeout", hdev->name);
1415 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001416 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001417}
1418
Szymon Janc2763eda2011-03-22 13:12:22 +01001419struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001420 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001421{
1422 struct oob_data *data;
1423
1424 list_for_each_entry(data, &hdev->remote_oob_data, list)
1425 if (bacmp(bdaddr, &data->bdaddr) == 0)
1426 return data;
1427
1428 return NULL;
1429}
1430
1431int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1432{
1433 struct oob_data *data;
1434
1435 data = hci_find_remote_oob_data(hdev, bdaddr);
1436 if (!data)
1437 return -ENOENT;
1438
1439 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1440
1441 list_del(&data->list);
1442 kfree(data);
1443
1444 return 0;
1445}
1446
1447int hci_remote_oob_data_clear(struct hci_dev *hdev)
1448{
1449 struct oob_data *data, *n;
1450
1451 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1452 list_del(&data->list);
1453 kfree(data);
1454 }
1455
1456 return 0;
1457}
1458
1459int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001460 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001461{
1462 struct oob_data *data;
1463
1464 data = hci_find_remote_oob_data(hdev, bdaddr);
1465
1466 if (!data) {
1467 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1468 if (!data)
1469 return -ENOMEM;
1470
1471 bacpy(&data->bdaddr, bdaddr);
1472 list_add(&data->list, &hdev->remote_oob_data);
1473 }
1474
1475 memcpy(data->hash, hash, sizeof(data->hash));
1476 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1477
1478 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1479
1480 return 0;
1481}
1482
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001483struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001484{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001485 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001486
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001487 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001488 if (bacmp(bdaddr, &b->bdaddr) == 0)
1489 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001490
1491 return NULL;
1492}
1493
1494int hci_blacklist_clear(struct hci_dev *hdev)
1495{
1496 struct list_head *p, *n;
1497
1498 list_for_each_safe(p, n, &hdev->blacklist) {
1499 struct bdaddr_list *b;
1500
1501 b = list_entry(p, struct bdaddr_list, list);
1502
1503 list_del(p);
1504 kfree(b);
1505 }
1506
1507 return 0;
1508}
1509
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001510int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001511{
1512 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001513
1514 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1515 return -EBADF;
1516
Antti Julku5e762442011-08-25 16:48:02 +03001517 if (hci_blacklist_lookup(hdev, bdaddr))
1518 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001519
1520 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001521 if (!entry)
1522 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001523
1524 bacpy(&entry->bdaddr, bdaddr);
1525
1526 list_add(&entry->list, &hdev->blacklist);
1527
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001528 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001529}
1530
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001531int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001532{
1533 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001534
Szymon Janc1ec918c2011-11-16 09:32:21 +01001535 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001536 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001537
1538 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001539 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001540 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001541
1542 list_del(&entry->list);
1543 kfree(entry);
1544
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001545 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001546}
1547
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001548static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001549{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001550 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001551 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001552
1553 hci_dev_lock(hdev);
1554
1555 hci_adv_entries_clear(hdev);
1556
1557 hci_dev_unlock(hdev);
1558}
1559
Andre Guedes76c86862011-05-26 16:23:50 -03001560int hci_adv_entries_clear(struct hci_dev *hdev)
1561{
1562 struct adv_entry *entry, *tmp;
1563
1564 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1565 list_del(&entry->list);
1566 kfree(entry);
1567 }
1568
1569 BT_DBG("%s adv cache cleared", hdev->name);
1570
1571 return 0;
1572}
1573
1574struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1575{
1576 struct adv_entry *entry;
1577
1578 list_for_each_entry(entry, &hdev->adv_entries, list)
1579 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1580 return entry;
1581
1582 return NULL;
1583}
1584
1585static inline int is_connectable_adv(u8 evt_type)
1586{
1587 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1588 return 1;
1589
1590 return 0;
1591}
1592
1593int hci_add_adv_entry(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001594 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001595 return -EINVAL;
1596
1597 /* Only new entries should be added to adv_entries. So, if
1598 * bdaddr was found, don't add it. */
1599 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1600 return 0;
1601
Andre Guedes4777bfd2012-01-30 23:31:28 -03001602 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001603 if (!entry)
1604 return -ENOMEM;
1605
1606 bacpy(&entry->bdaddr, &ev->bdaddr);
1607 entry->bdaddr_type = ev->bdaddr_type;
1608
1609 list_add(&entry->list, &hdev->adv_entries);
1610
1611 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1612 batostr(&entry->bdaddr), entry->bdaddr_type);
1613
1614 return 0;
1615}
1616
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001617static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1618{
1619 struct le_scan_params *param = (struct le_scan_params *) opt;
1620 struct hci_cp_le_set_scan_param cp;
1621
1622 memset(&cp, 0, sizeof(cp));
1623 cp.type = param->type;
1624 cp.interval = cpu_to_le16(param->interval);
1625 cp.window = cpu_to_le16(param->window);
1626
1627 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1628}
1629
1630static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1631{
1632 struct hci_cp_le_set_scan_enable cp;
1633
1634 memset(&cp, 0, sizeof(cp));
1635 cp.enable = 1;
1636
1637 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1638}
1639
1640static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001641 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001642{
1643 long timeo = msecs_to_jiffies(3000);
1644 struct le_scan_params param;
1645 int err;
1646
1647 BT_DBG("%s", hdev->name);
1648
1649 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1650 return -EINPROGRESS;
1651
1652 param.type = type;
1653 param.interval = interval;
1654 param.window = window;
1655
1656 hci_req_lock(hdev);
1657
1658 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001659 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001660 if (!err)
1661 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1662
1663 hci_req_unlock(hdev);
1664
1665 if (err < 0)
1666 return err;
1667
1668 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001669 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001670
1671 return 0;
1672}
1673
Andre Guedes7dbfac12012-03-15 16:52:07 -03001674int hci_cancel_le_scan(struct hci_dev *hdev)
1675{
1676 BT_DBG("%s", hdev->name);
1677
1678 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1679 return -EALREADY;
1680
1681 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1682 struct hci_cp_le_set_scan_enable cp;
1683
1684 /* Send HCI command to disable LE Scan */
1685 memset(&cp, 0, sizeof(cp));
1686 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1687 }
1688
1689 return 0;
1690}
1691
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001692static void le_scan_disable_work(struct work_struct *work)
1693{
1694 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001696 struct hci_cp_le_set_scan_enable cp;
1697
1698 BT_DBG("%s", hdev->name);
1699
1700 memset(&cp, 0, sizeof(cp));
1701
1702 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1703}
1704
Andre Guedes28b75a82012-02-03 17:48:00 -03001705static void le_scan_work(struct work_struct *work)
1706{
1707 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1708 struct le_scan_params *param = &hdev->le_scan_params;
1709
1710 BT_DBG("%s", hdev->name);
1711
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001712 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1713 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001714}
1715
1716int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001717 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001718{
1719 struct le_scan_params *param = &hdev->le_scan_params;
1720
1721 BT_DBG("%s", hdev->name);
1722
1723 if (work_busy(&hdev->le_scan))
1724 return -EINPROGRESS;
1725
1726 param->type = type;
1727 param->interval = interval;
1728 param->window = window;
1729 param->timeout = timeout;
1730
1731 queue_work(system_long_wq, &hdev->le_scan);
1732
1733 return 0;
1734}
1735
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736/* Register HCI device */
1737int hci_register_dev(struct hci_dev *hdev)
1738{
1739 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001740 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001742 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
David Herrmann010666a2012-01-07 15:47:07 +01001744 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 return -EINVAL;
1746
Mat Martineau08add512011-11-02 16:18:36 -07001747 /* Do not allow HCI_AMP devices to register at index 0,
1748 * so the index can be used as the AMP controller ID.
1749 */
1750 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1751
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001752 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
1754 /* Find first available device id */
1755 list_for_each(p, &hci_dev_list) {
1756 if (list_entry(p, struct hci_dev, list)->id != id)
1757 break;
1758 head = p; id++;
1759 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 sprintf(hdev->name, "hci%d", id);
1762 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001763 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001765 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
1767 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001768 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001770 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001772 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Marcel Holtmann04837f62006-07-03 10:02:33 +02001774 hdev->idle_timeout = 0;
1775 hdev->sniff_max_interval = 800;
1776 hdev->sniff_min_interval = 80;
1777
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001778 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001779 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001780 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
1783 skb_queue_head_init(&hdev->rx_q);
1784 skb_queue_head_init(&hdev->cmd_q);
1785 skb_queue_head_init(&hdev->raw_q);
1786
Ville Tervo6bd32322011-02-16 16:32:41 +02001787 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1788
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301789 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001790 hdev->reassembly[i] = NULL;
1791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001793 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
Johan Hedberg30883512012-01-04 14:16:21 +02001795 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
1797 hci_conn_hash_init(hdev);
1798
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001799 INIT_LIST_HEAD(&hdev->mgmt_pending);
1800
David Millerea4bd8b2010-07-30 21:54:49 -07001801 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001802
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001803 INIT_LIST_HEAD(&hdev->uuids);
1804
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001805 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001806 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001807
Szymon Janc2763eda2011-03-22 13:12:22 +01001808 INIT_LIST_HEAD(&hdev->remote_oob_data);
1809
Andre Guedes76c86862011-05-26 16:23:50 -03001810 INIT_LIST_HEAD(&hdev->adv_entries);
1811
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001812 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001813 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001814 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001815
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001816 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1817
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1819
1820 atomic_set(&hdev->promisc, 0);
1821
Andre Guedes28b75a82012-02-03 17:48:00 -03001822 INIT_WORK(&hdev->le_scan, le_scan_work);
1823
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001824 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1825
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001826 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001828 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1829 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001830 if (!hdev->workqueue) {
1831 error = -ENOMEM;
1832 goto err;
1833 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001834
David Herrmann33ca9542011-10-08 14:58:49 +02001835 error = hci_add_sysfs(hdev);
1836 if (error < 0)
1837 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001839 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1840 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1841 if (hdev->rfkill) {
1842 if (rfkill_register(hdev->rfkill) < 0) {
1843 rfkill_destroy(hdev->rfkill);
1844 hdev->rfkill = NULL;
1845 }
1846 }
1847
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001848 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1849 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001850 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001853 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854
1855 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001856
David Herrmann33ca9542011-10-08 14:58:49 +02001857err_wqueue:
1858 destroy_workqueue(hdev->workqueue);
1859err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001860 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001861 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001862 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001863
David Herrmann33ca9542011-10-08 14:58:49 +02001864 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865}
1866EXPORT_SYMBOL(hci_register_dev);
1867
1868/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001869void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870{
Marcel Holtmannef222012007-07-11 06:42:04 +02001871 int i;
1872
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001873 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874
Johan Hovold94324962012-03-15 14:48:41 +01001875 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1876
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001877 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001879 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
1881 hci_dev_do_close(hdev);
1882
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301883 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001884 kfree_skb(hdev->reassembly[i]);
1885
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001886 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001887 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001888 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001889 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001890 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001891 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001892
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001893 /* mgmt_index_removed should take care of emptying the
1894 * pending list */
1895 BUG_ON(!list_empty(&hdev->mgmt_pending));
1896
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 hci_notify(hdev, HCI_DEV_UNREG);
1898
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001899 if (hdev->rfkill) {
1900 rfkill_unregister(hdev->rfkill);
1901 rfkill_destroy(hdev->rfkill);
1902 }
1903
David Herrmannce242972011-10-08 14:58:48 +02001904 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001905
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001906 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001907
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001908 destroy_workqueue(hdev->workqueue);
1909
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001910 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001911 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001912 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001913 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001914 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001915 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001916 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001917 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001918
David Herrmanndc946bd2012-01-07 15:47:24 +01001919 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920}
1921EXPORT_SYMBOL(hci_unregister_dev);
1922
1923/* Suspend HCI device */
1924int hci_suspend_dev(struct hci_dev *hdev)
1925{
1926 hci_notify(hdev, HCI_DEV_SUSPEND);
1927 return 0;
1928}
1929EXPORT_SYMBOL(hci_suspend_dev);
1930
1931/* Resume HCI device */
1932int hci_resume_dev(struct hci_dev *hdev)
1933{
1934 hci_notify(hdev, HCI_DEV_RESUME);
1935 return 0;
1936}
1937EXPORT_SYMBOL(hci_resume_dev);
1938
Marcel Holtmann76bca882009-11-18 00:40:39 +01001939/* Receive frame from HCI drivers */
1940int hci_recv_frame(struct sk_buff *skb)
1941{
1942 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1943 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1944 && !test_bit(HCI_INIT, &hdev->flags))) {
1945 kfree_skb(skb);
1946 return -ENXIO;
1947 }
1948
1949 /* Incomming skb */
1950 bt_cb(skb)->incoming = 1;
1951
1952 /* Time stamp */
1953 __net_timestamp(skb);
1954
Marcel Holtmann76bca882009-11-18 00:40:39 +01001955 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001956 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001957
Marcel Holtmann76bca882009-11-18 00:40:39 +01001958 return 0;
1959}
1960EXPORT_SYMBOL(hci_recv_frame);
1961
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301962static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001963 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301964{
1965 int len = 0;
1966 int hlen = 0;
1967 int remain = count;
1968 struct sk_buff *skb;
1969 struct bt_skb_cb *scb;
1970
1971 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1972 index >= NUM_REASSEMBLY)
1973 return -EILSEQ;
1974
1975 skb = hdev->reassembly[index];
1976
1977 if (!skb) {
1978 switch (type) {
1979 case HCI_ACLDATA_PKT:
1980 len = HCI_MAX_FRAME_SIZE;
1981 hlen = HCI_ACL_HDR_SIZE;
1982 break;
1983 case HCI_EVENT_PKT:
1984 len = HCI_MAX_EVENT_SIZE;
1985 hlen = HCI_EVENT_HDR_SIZE;
1986 break;
1987 case HCI_SCODATA_PKT:
1988 len = HCI_MAX_SCO_SIZE;
1989 hlen = HCI_SCO_HDR_SIZE;
1990 break;
1991 }
1992
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001993 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301994 if (!skb)
1995 return -ENOMEM;
1996
1997 scb = (void *) skb->cb;
1998 scb->expect = hlen;
1999 scb->pkt_type = type;
2000
2001 skb->dev = (void *) hdev;
2002 hdev->reassembly[index] = skb;
2003 }
2004
2005 while (count) {
2006 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002007 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302008
2009 memcpy(skb_put(skb, len), data, len);
2010
2011 count -= len;
2012 data += len;
2013 scb->expect -= len;
2014 remain = count;
2015
2016 switch (type) {
2017 case HCI_EVENT_PKT:
2018 if (skb->len == HCI_EVENT_HDR_SIZE) {
2019 struct hci_event_hdr *h = hci_event_hdr(skb);
2020 scb->expect = h->plen;
2021
2022 if (skb_tailroom(skb) < scb->expect) {
2023 kfree_skb(skb);
2024 hdev->reassembly[index] = NULL;
2025 return -ENOMEM;
2026 }
2027 }
2028 break;
2029
2030 case HCI_ACLDATA_PKT:
2031 if (skb->len == HCI_ACL_HDR_SIZE) {
2032 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2033 scb->expect = __le16_to_cpu(h->dlen);
2034
2035 if (skb_tailroom(skb) < scb->expect) {
2036 kfree_skb(skb);
2037 hdev->reassembly[index] = NULL;
2038 return -ENOMEM;
2039 }
2040 }
2041 break;
2042
2043 case HCI_SCODATA_PKT:
2044 if (skb->len == HCI_SCO_HDR_SIZE) {
2045 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2046 scb->expect = h->dlen;
2047
2048 if (skb_tailroom(skb) < scb->expect) {
2049 kfree_skb(skb);
2050 hdev->reassembly[index] = NULL;
2051 return -ENOMEM;
2052 }
2053 }
2054 break;
2055 }
2056
2057 if (scb->expect == 0) {
2058 /* Complete frame */
2059
2060 bt_cb(skb)->pkt_type = type;
2061 hci_recv_frame(skb);
2062
2063 hdev->reassembly[index] = NULL;
2064 return remain;
2065 }
2066 }
2067
2068 return remain;
2069}
2070
Marcel Holtmannef222012007-07-11 06:42:04 +02002071int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2072{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302073 int rem = 0;
2074
Marcel Holtmannef222012007-07-11 06:42:04 +02002075 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2076 return -EILSEQ;
2077
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002078 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002079 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302080 if (rem < 0)
2081 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002082
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302083 data += (count - rem);
2084 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002085 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002086
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302087 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002088}
2089EXPORT_SYMBOL(hci_recv_fragment);
2090
Suraj Sumangala99811512010-07-14 13:02:19 +05302091#define STREAM_REASSEMBLY 0
2092
2093int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2094{
2095 int type;
2096 int rem = 0;
2097
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002098 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302099 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2100
2101 if (!skb) {
2102 struct { char type; } *pkt;
2103
2104 /* Start of the frame */
2105 pkt = data;
2106 type = pkt->type;
2107
2108 data++;
2109 count--;
2110 } else
2111 type = bt_cb(skb)->pkt_type;
2112
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002113 rem = hci_reassembly(hdev, type, data, count,
2114 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302115 if (rem < 0)
2116 return rem;
2117
2118 data += (count - rem);
2119 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002120 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302121
2122 return rem;
2123}
2124EXPORT_SYMBOL(hci_recv_stream_fragment);
2125
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126/* ---- Interface to upper protocols ---- */
2127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128int hci_register_cb(struct hci_cb *cb)
2129{
2130 BT_DBG("%p name %s", cb, cb->name);
2131
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002132 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002134 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
2136 return 0;
2137}
2138EXPORT_SYMBOL(hci_register_cb);
2139
2140int hci_unregister_cb(struct hci_cb *cb)
2141{
2142 BT_DBG("%p name %s", cb, cb->name);
2143
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002144 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002146 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
2148 return 0;
2149}
2150EXPORT_SYMBOL(hci_unregister_cb);
2151
2152static int hci_send_frame(struct sk_buff *skb)
2153{
2154 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2155
2156 if (!hdev) {
2157 kfree_skb(skb);
2158 return -ENODEV;
2159 }
2160
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002161 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002163 /* Time stamp */
2164 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002166 /* Send copy to monitor */
2167 hci_send_to_monitor(hdev, skb);
2168
2169 if (atomic_read(&hdev->promisc)) {
2170 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002171 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 }
2173
2174 /* Get rid of skb owner, prior to sending to the driver. */
2175 skb_orphan(skb);
2176
2177 return hdev->send(skb);
2178}
2179
2180/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002181int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182{
2183 int len = HCI_COMMAND_HDR_SIZE + plen;
2184 struct hci_command_hdr *hdr;
2185 struct sk_buff *skb;
2186
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002187 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
2189 skb = bt_skb_alloc(len, GFP_ATOMIC);
2190 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002191 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 return -ENOMEM;
2193 }
2194
2195 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002196 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 hdr->plen = plen;
2198
2199 if (plen)
2200 memcpy(skb_put(skb, plen), param, plen);
2201
2202 BT_DBG("skb len %d", skb->len);
2203
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002206
Johan Hedberga5040ef2011-01-10 13:28:59 +02002207 if (test_bit(HCI_INIT, &hdev->flags))
2208 hdev->init_last_cmd = opcode;
2209
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002211 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 return 0;
2214}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002217void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218{
2219 struct hci_command_hdr *hdr;
2220
2221 if (!hdev->sent_cmd)
2222 return NULL;
2223
2224 hdr = (void *) hdev->sent_cmd->data;
2225
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002226 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 return NULL;
2228
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002229 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230
2231 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2232}
2233
2234/* Send ACL data */
2235static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2236{
2237 struct hci_acl_hdr *hdr;
2238 int len = skb->len;
2239
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002240 skb_push(skb, HCI_ACL_HDR_SIZE);
2241 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002242 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002243 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2244 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245}
2246
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002247static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2248 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249{
2250 struct hci_dev *hdev = conn->hdev;
2251 struct sk_buff *list;
2252
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002253 list = skb_shinfo(skb)->frag_list;
2254 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 /* Non fragmented */
2256 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2257
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002258 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 } else {
2260 /* Fragmented */
2261 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2262
2263 skb_shinfo(skb)->frag_list = NULL;
2264
2265 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002266 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002268 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002269
2270 flags &= ~ACL_START;
2271 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 do {
2273 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002276 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002277 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
2279 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2280
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002281 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 } while (list);
2283
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002284 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002286}
2287
2288void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2289{
2290 struct hci_conn *conn = chan->conn;
2291 struct hci_dev *hdev = conn->hdev;
2292
2293 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2294
2295 skb->dev = (void *) hdev;
2296 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2297 hci_add_acl_hdr(skb, conn->handle, flags);
2298
2299 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002301 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302}
2303EXPORT_SYMBOL(hci_send_acl);
2304
2305/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002306void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307{
2308 struct hci_dev *hdev = conn->hdev;
2309 struct hci_sco_hdr hdr;
2310
2311 BT_DBG("%s len %d", hdev->name, skb->len);
2312
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002313 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 hdr.dlen = skb->len;
2315
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002316 skb_push(skb, HCI_SCO_HDR_SIZE);
2317 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002318 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
2320 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002321 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002324 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325}
2326EXPORT_SYMBOL(hci_send_sco);
2327
2328/* ---- HCI TX task (outgoing data) ---- */
2329
2330/* HCI Connection scheduler */
2331static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2332{
2333 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002334 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002337 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002339
2340 rcu_read_lock();
2341
2342 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002343 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002345
2346 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2347 continue;
2348
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 num++;
2350
2351 if (c->sent < min) {
2352 min = c->sent;
2353 conn = c;
2354 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002355
2356 if (hci_conn_num(hdev, type) == num)
2357 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 }
2359
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002360 rcu_read_unlock();
2361
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002363 int cnt, q;
2364
2365 switch (conn->type) {
2366 case ACL_LINK:
2367 cnt = hdev->acl_cnt;
2368 break;
2369 case SCO_LINK:
2370 case ESCO_LINK:
2371 cnt = hdev->sco_cnt;
2372 break;
2373 case LE_LINK:
2374 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2375 break;
2376 default:
2377 cnt = 0;
2378 BT_ERR("Unknown link type");
2379 }
2380
2381 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 *quote = q ? q : 1;
2383 } else
2384 *quote = 0;
2385
2386 BT_DBG("conn %p quote %d", conn, *quote);
2387 return conn;
2388}
2389
Ville Tervobae1f5d92011-02-10 22:38:53 -03002390static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391{
2392 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002393 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
Ville Tervobae1f5d92011-02-10 22:38:53 -03002395 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002397 rcu_read_lock();
2398
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002400 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002401 if (c->type == type && c->sent) {
2402 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 hdev->name, batostr(&c->dst));
2404 hci_acl_disconn(c, 0x13);
2405 }
2406 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002407
2408 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409}
2410
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002411static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2412 int *quote)
2413{
2414 struct hci_conn_hash *h = &hdev->conn_hash;
2415 struct hci_chan *chan = NULL;
2416 int num = 0, min = ~0, cur_prio = 0;
2417 struct hci_conn *conn;
2418 int cnt, q, conn_num = 0;
2419
2420 BT_DBG("%s", hdev->name);
2421
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002422 rcu_read_lock();
2423
2424 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002425 struct hci_chan *tmp;
2426
2427 if (conn->type != type)
2428 continue;
2429
2430 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2431 continue;
2432
2433 conn_num++;
2434
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002435 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002436 struct sk_buff *skb;
2437
2438 if (skb_queue_empty(&tmp->data_q))
2439 continue;
2440
2441 skb = skb_peek(&tmp->data_q);
2442 if (skb->priority < cur_prio)
2443 continue;
2444
2445 if (skb->priority > cur_prio) {
2446 num = 0;
2447 min = ~0;
2448 cur_prio = skb->priority;
2449 }
2450
2451 num++;
2452
2453 if (conn->sent < min) {
2454 min = conn->sent;
2455 chan = tmp;
2456 }
2457 }
2458
2459 if (hci_conn_num(hdev, type) == conn_num)
2460 break;
2461 }
2462
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002463 rcu_read_unlock();
2464
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002465 if (!chan)
2466 return NULL;
2467
2468 switch (chan->conn->type) {
2469 case ACL_LINK:
2470 cnt = hdev->acl_cnt;
2471 break;
2472 case SCO_LINK:
2473 case ESCO_LINK:
2474 cnt = hdev->sco_cnt;
2475 break;
2476 case LE_LINK:
2477 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2478 break;
2479 default:
2480 cnt = 0;
2481 BT_ERR("Unknown link type");
2482 }
2483
2484 q = cnt / num;
2485 *quote = q ? q : 1;
2486 BT_DBG("chan %p quote %d", chan, *quote);
2487 return chan;
2488}
2489
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002490static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2491{
2492 struct hci_conn_hash *h = &hdev->conn_hash;
2493 struct hci_conn *conn;
2494 int num = 0;
2495
2496 BT_DBG("%s", hdev->name);
2497
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002498 rcu_read_lock();
2499
2500 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002501 struct hci_chan *chan;
2502
2503 if (conn->type != type)
2504 continue;
2505
2506 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2507 continue;
2508
2509 num++;
2510
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002511 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002512 struct sk_buff *skb;
2513
2514 if (chan->sent) {
2515 chan->sent = 0;
2516 continue;
2517 }
2518
2519 if (skb_queue_empty(&chan->data_q))
2520 continue;
2521
2522 skb = skb_peek(&chan->data_q);
2523 if (skb->priority >= HCI_PRIO_MAX - 1)
2524 continue;
2525
2526 skb->priority = HCI_PRIO_MAX - 1;
2527
2528 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2529 skb->priority);
2530 }
2531
2532 if (hci_conn_num(hdev, type) == num)
2533 break;
2534 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002535
2536 rcu_read_unlock();
2537
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002538}
2539
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002540static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2541{
2542 /* Calculate count of blocks used by this packet */
2543 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2544}
2545
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002546static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 if (!test_bit(HCI_RAW, &hdev->flags)) {
2549 /* ACL tx timeout must be longer than maximum
2550 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002551 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002552 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002553 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002555}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002557static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2558{
2559 unsigned int cnt = hdev->acl_cnt;
2560 struct hci_chan *chan;
2561 struct sk_buff *skb;
2562 int quote;
2563
2564 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002565
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002566 while (hdev->acl_cnt &&
2567 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002568 u32 priority = (skb_peek(&chan->data_q))->priority;
2569 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002570 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2571 skb->len, skb->priority);
2572
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002573 /* Stop if priority has changed */
2574 if (skb->priority < priority)
2575 break;
2576
2577 skb = skb_dequeue(&chan->data_q);
2578
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002579 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002580 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002581
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 hci_send_frame(skb);
2583 hdev->acl_last_tx = jiffies;
2584
2585 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002586 chan->sent++;
2587 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 }
2589 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002590
2591 if (cnt != hdev->acl_cnt)
2592 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593}
2594
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002595static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2596{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002597 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002598 struct hci_chan *chan;
2599 struct sk_buff *skb;
2600 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002601
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002602 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002603
2604 while (hdev->block_cnt > 0 &&
2605 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2606 u32 priority = (skb_peek(&chan->data_q))->priority;
2607 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2608 int blocks;
2609
2610 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2611 skb->len, skb->priority);
2612
2613 /* Stop if priority has changed */
2614 if (skb->priority < priority)
2615 break;
2616
2617 skb = skb_dequeue(&chan->data_q);
2618
2619 blocks = __get_blocks(hdev, skb);
2620 if (blocks > hdev->block_cnt)
2621 return;
2622
2623 hci_conn_enter_active_mode(chan->conn,
2624 bt_cb(skb)->force_active);
2625
2626 hci_send_frame(skb);
2627 hdev->acl_last_tx = jiffies;
2628
2629 hdev->block_cnt -= blocks;
2630 quote -= blocks;
2631
2632 chan->sent += blocks;
2633 chan->conn->sent += blocks;
2634 }
2635 }
2636
2637 if (cnt != hdev->block_cnt)
2638 hci_prio_recalculate(hdev, ACL_LINK);
2639}
2640
2641static inline void hci_sched_acl(struct hci_dev *hdev)
2642{
2643 BT_DBG("%s", hdev->name);
2644
2645 if (!hci_conn_num(hdev, ACL_LINK))
2646 return;
2647
2648 switch (hdev->flow_ctl_mode) {
2649 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2650 hci_sched_acl_pkt(hdev);
2651 break;
2652
2653 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2654 hci_sched_acl_blk(hdev);
2655 break;
2656 }
2657}
2658
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659/* Schedule SCO */
2660static inline void hci_sched_sco(struct hci_dev *hdev)
2661{
2662 struct hci_conn *conn;
2663 struct sk_buff *skb;
2664 int quote;
2665
2666 BT_DBG("%s", hdev->name);
2667
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002668 if (!hci_conn_num(hdev, SCO_LINK))
2669 return;
2670
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2672 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2673 BT_DBG("skb %p len %d", skb, skb->len);
2674 hci_send_frame(skb);
2675
2676 conn->sent++;
2677 if (conn->sent == ~0)
2678 conn->sent = 0;
2679 }
2680 }
2681}
2682
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002683static inline void hci_sched_esco(struct hci_dev *hdev)
2684{
2685 struct hci_conn *conn;
2686 struct sk_buff *skb;
2687 int quote;
2688
2689 BT_DBG("%s", hdev->name);
2690
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002691 if (!hci_conn_num(hdev, ESCO_LINK))
2692 return;
2693
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002694 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2695 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2696 BT_DBG("skb %p len %d", skb, skb->len);
2697 hci_send_frame(skb);
2698
2699 conn->sent++;
2700 if (conn->sent == ~0)
2701 conn->sent = 0;
2702 }
2703 }
2704}
2705
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002706static inline void hci_sched_le(struct hci_dev *hdev)
2707{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002708 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002709 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002710 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002711
2712 BT_DBG("%s", hdev->name);
2713
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002714 if (!hci_conn_num(hdev, LE_LINK))
2715 return;
2716
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002717 if (!test_bit(HCI_RAW, &hdev->flags)) {
2718 /* LE tx timeout must be longer than maximum
2719 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002720 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002721 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002722 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002723 }
2724
2725 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002726 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002727 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002728 u32 priority = (skb_peek(&chan->data_q))->priority;
2729 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002730 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2731 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002732
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002733 /* Stop if priority has changed */
2734 if (skb->priority < priority)
2735 break;
2736
2737 skb = skb_dequeue(&chan->data_q);
2738
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002739 hci_send_frame(skb);
2740 hdev->le_last_tx = jiffies;
2741
2742 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002743 chan->sent++;
2744 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002745 }
2746 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002747
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002748 if (hdev->le_pkts)
2749 hdev->le_cnt = cnt;
2750 else
2751 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002752
2753 if (cnt != tmp)
2754 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002755}
2756
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002757static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002759 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 struct sk_buff *skb;
2761
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002762 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2763 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
2765 /* Schedule queues and send stuff to HCI driver */
2766
2767 hci_sched_acl(hdev);
2768
2769 hci_sched_sco(hdev);
2770
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002771 hci_sched_esco(hdev);
2772
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002773 hci_sched_le(hdev);
2774
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 /* Send next queued raw (unknown type) packet */
2776 while ((skb = skb_dequeue(&hdev->raw_q)))
2777 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778}
2779
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002780/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781
2782/* ACL data packet */
2783static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2784{
2785 struct hci_acl_hdr *hdr = (void *) skb->data;
2786 struct hci_conn *conn;
2787 __u16 handle, flags;
2788
2789 skb_pull(skb, HCI_ACL_HDR_SIZE);
2790
2791 handle = __le16_to_cpu(hdr->handle);
2792 flags = hci_flags(handle);
2793 handle = hci_handle(handle);
2794
2795 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2796
2797 hdev->stat.acl_rx++;
2798
2799 hci_dev_lock(hdev);
2800 conn = hci_conn_hash_lookup_handle(hdev, handle);
2801 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002802
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002804 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002805
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002807 l2cap_recv_acldata(conn, skb, flags);
2808 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002810 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 hdev->name, handle);
2812 }
2813
2814 kfree_skb(skb);
2815}
2816
2817/* SCO data packet */
2818static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2819{
2820 struct hci_sco_hdr *hdr = (void *) skb->data;
2821 struct hci_conn *conn;
2822 __u16 handle;
2823
2824 skb_pull(skb, HCI_SCO_HDR_SIZE);
2825
2826 handle = __le16_to_cpu(hdr->handle);
2827
2828 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2829
2830 hdev->stat.sco_rx++;
2831
2832 hci_dev_lock(hdev);
2833 conn = hci_conn_hash_lookup_handle(hdev, handle);
2834 hci_dev_unlock(hdev);
2835
2836 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002838 sco_recv_scodata(conn, skb);
2839 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002841 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 hdev->name, handle);
2843 }
2844
2845 kfree_skb(skb);
2846}
2847
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002848static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002850 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 struct sk_buff *skb;
2852
2853 BT_DBG("%s", hdev->name);
2854
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002856 /* Send copy to monitor */
2857 hci_send_to_monitor(hdev, skb);
2858
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 if (atomic_read(&hdev->promisc)) {
2860 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002861 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 }
2863
2864 if (test_bit(HCI_RAW, &hdev->flags)) {
2865 kfree_skb(skb);
2866 continue;
2867 }
2868
2869 if (test_bit(HCI_INIT, &hdev->flags)) {
2870 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002871 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 case HCI_ACLDATA_PKT:
2873 case HCI_SCODATA_PKT:
2874 kfree_skb(skb);
2875 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 }
2878
2879 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002880 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002882 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 hci_event_packet(hdev, skb);
2884 break;
2885
2886 case HCI_ACLDATA_PKT:
2887 BT_DBG("%s ACL data packet", hdev->name);
2888 hci_acldata_packet(hdev, skb);
2889 break;
2890
2891 case HCI_SCODATA_PKT:
2892 BT_DBG("%s SCO data packet", hdev->name);
2893 hci_scodata_packet(hdev, skb);
2894 break;
2895
2896 default:
2897 kfree_skb(skb);
2898 break;
2899 }
2900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901}
2902
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002903static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002905 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 struct sk_buff *skb;
2907
2908 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2909
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002911 if (atomic_read(&hdev->cmd_cnt)) {
2912 skb = skb_dequeue(&hdev->cmd_q);
2913 if (!skb)
2914 return;
2915
Wei Yongjun7585b972009-02-25 18:29:52 +08002916 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002918 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2919 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 atomic_dec(&hdev->cmd_cnt);
2921 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002922 if (test_bit(HCI_RESET, &hdev->flags))
2923 del_timer(&hdev->cmd_timer);
2924 else
2925 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002926 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 } else {
2928 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002929 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 }
2931 }
2932}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002933
2934int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2935{
2936 /* General inquiry access code (GIAC) */
2937 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2938 struct hci_cp_inquiry cp;
2939
2940 BT_DBG("%s", hdev->name);
2941
2942 if (test_bit(HCI_INQUIRY, &hdev->flags))
2943 return -EINPROGRESS;
2944
Johan Hedberg46632622012-01-02 16:06:08 +02002945 inquiry_cache_flush(hdev);
2946
Andre Guedes2519a1f2011-11-07 11:45:24 -03002947 memset(&cp, 0, sizeof(cp));
2948 memcpy(&cp.lap, lap, sizeof(cp.lap));
2949 cp.length = length;
2950
2951 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2952}
Andre Guedes023d50492011-11-04 14:16:52 -03002953
2954int hci_cancel_inquiry(struct hci_dev *hdev)
2955{
2956 BT_DBG("%s", hdev->name);
2957
2958 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002959 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002960
2961 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2962}