blob: 3431ec908c02dab65da0489f8f654c313d0dce79 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
34
Johan Hedbergab81cbf2010-12-15 13:53:18 +020035#define AUTO_OFF_TIMEOUT 2000
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/* ---- HCI notifications ---- */
50
Marcel Holtmann65164552005-10-28 19:20:48 +020051static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052{
Marcel Holtmann040030e2012-02-20 14:50:37 +010053 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054}
55
56/* ---- HCI requests ---- */
57
Johan Hedberg23bb5762010-12-21 23:01:27 +020058void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Johan Hedberg23bb5762010-12-21 23:01:27 +020060 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
61
Johan Hedberga5040ef2011-01-10 13:28:59 +020062 /* If this is the init phase check if the completed command matches
63 * the last init command, and if not just return.
64 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020065 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
66 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020067 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020068 struct sk_buff *skb;
69
70 /* Some CSR based controllers generate a spontaneous
71 * reset complete event during init and any pending
72 * command will never be completed. In such a case we
73 * need to resend whatever was the last sent
74 * command.
75 */
76
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020077 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020078 return;
79
80 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
81 if (skb) {
82 skb_queue_head(&hdev->cmd_q, skb);
83 queue_work(hdev->workqueue, &hdev->cmd_work);
84 }
85
Johan Hedberg23bb5762010-12-21 23:01:27 +020086 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020087 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 if (hdev->req_status == HCI_REQ_PEND) {
90 hdev->req_result = result;
91 hdev->req_status = HCI_REQ_DONE;
92 wake_up_interruptible(&hdev->req_wait_q);
93 }
94}
95
96static void hci_req_cancel(struct hci_dev *hdev, int err)
97{
98 BT_DBG("%s err 0x%2.2x", hdev->name, err);
99
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = err;
102 hdev->req_status = HCI_REQ_CANCELED;
103 wake_up_interruptible(&hdev->req_wait_q);
104 }
105}
106
107/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300108static int __hci_request(struct hci_dev *hdev,
109 void (*req)(struct hci_dev *hdev, unsigned long opt),
110 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 DECLARE_WAITQUEUE(wait, current);
113 int err = 0;
114
115 BT_DBG("%s start", hdev->name);
116
117 hdev->req_status = HCI_REQ_PEND;
118
119 add_wait_queue(&hdev->req_wait_q, &wait);
120 set_current_state(TASK_INTERRUPTIBLE);
121
122 req(hdev, opt);
123 schedule_timeout(timeout);
124
125 remove_wait_queue(&hdev->req_wait_q, &wait);
126
127 if (signal_pending(current))
128 return -EINTR;
129
130 switch (hdev->req_status) {
131 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700132 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 break;
134
135 case HCI_REQ_CANCELED:
136 err = -hdev->req_result;
137 break;
138
139 default:
140 err = -ETIMEDOUT;
141 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Johan Hedberga5040ef2011-01-10 13:28:59 +0200144 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 BT_DBG("%s end: err %d", hdev->name, err);
147
148 return err;
149}
150
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300151static int hci_request(struct hci_dev *hdev,
152 void (*req)(struct hci_dev *hdev, unsigned long opt),
153 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
155 int ret;
156
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200157 if (!test_bit(HCI_UP, &hdev->flags))
158 return -ENETDOWN;
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 /* Serialize all requests */
161 hci_req_lock(hdev);
162 ret = __hci_request(hdev, req, opt, timeout);
163 hci_req_unlock(hdev);
164
165 return ret;
166}
167
168static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
169{
170 BT_DBG("%s %ld", hdev->name, opt);
171
172 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300173 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200174 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200177static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200179 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800180 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200181 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200183 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 /* Mandatory initialization */
186
187 /* Reset */
Szymon Janca6c511c2012-05-23 12:35:46 +0200188 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200196 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200200 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200203 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
204
205 /* Read Class of Device */
206 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
207
208 /* Read Local Name */
209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200212 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
214 /* Optional initialization */
215
216 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200217 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700221 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200223
224 bacpy(&cp.bdaddr, BDADDR_ANY);
225 cp.delete_all = 1;
226 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200229static void amp_init(struct hci_dev *hdev)
230{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200231 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
232
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200233 /* Reset */
234 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
235
236 /* Read Local Version */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300238
239 /* Read Local AMP Info */
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200241}
242
243static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
244{
245 struct sk_buff *skb;
246
247 BT_DBG("%s %ld", hdev->name, opt);
248
249 /* Driver initialization */
250
251 /* Special commands */
252 while ((skb = skb_dequeue(&hdev->driver_init))) {
253 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
254 skb->dev = (void *) hdev;
255
256 skb_queue_tail(&hdev->cmd_q, skb);
257 queue_work(hdev->workqueue, &hdev->cmd_work);
258 }
259 skb_queue_purge(&hdev->driver_init);
260
261 switch (hdev->dev_type) {
262 case HCI_BREDR:
263 bredr_init(hdev);
264 break;
265
266 case HCI_AMP:
267 amp_init(hdev);
268 break;
269
270 default:
271 BT_ERR("Unknown device type %d", hdev->dev_type);
272 break;
273 }
274
275}
276
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300277static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
278{
279 BT_DBG("%s", hdev->name);
280
281 /* Read LE buffer size */
282 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
283}
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
286{
287 __u8 scan = opt;
288
289 BT_DBG("%s %x", hdev->name, scan);
290
291 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200292 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293}
294
295static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 auth = opt;
298
299 BT_DBG("%s %x", hdev->name, auth);
300
301 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 encrypt = opt;
308
309 BT_DBG("%s %x", hdev->name, encrypt);
310
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200311 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200315static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __le16 policy = cpu_to_le16(opt);
318
Marcel Holtmanna418b892008-11-30 12:17:28 +0100319 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200320
321 /* Default link policy */
322 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
323}
324
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900325/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 * Device is held on return. */
327struct hci_dev *hci_dev_get(int index)
328{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200329 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 BT_DBG("%d", index);
332
333 if (index < 0)
334 return NULL;
335
336 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200337 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if (d->id == index) {
339 hdev = hci_dev_hold(d);
340 break;
341 }
342 }
343 read_unlock(&hci_dev_list_lock);
344 return hdev;
345}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200348
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200349bool hci_discovery_active(struct hci_dev *hdev)
350{
351 struct discovery_state *discov = &hdev->discovery;
352
Andre Guedes6fbe1952012-02-03 17:47:58 -0300353 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300354 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300355 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200356 return true;
357
Andre Guedes6fbe1952012-02-03 17:47:58 -0300358 default:
359 return false;
360 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200361}
362
Johan Hedbergff9ef572012-01-04 14:23:45 +0200363void hci_discovery_set_state(struct hci_dev *hdev, int state)
364{
365 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
366
367 if (hdev->discovery.state == state)
368 return;
369
370 switch (state) {
371 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300372 if (hdev->discovery.state != DISCOVERY_STARTING)
373 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200374 break;
375 case DISCOVERY_STARTING:
376 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300377 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200378 mgmt_discovering(hdev, 1);
379 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200380 case DISCOVERY_RESOLVING:
381 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200382 case DISCOVERY_STOPPING:
383 break;
384 }
385
386 hdev->discovery.state = state;
387}
388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389static void inquiry_cache_flush(struct hci_dev *hdev)
390{
Johan Hedberg30883512012-01-04 14:16:21 +0200391 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200392 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Johan Hedberg561aafb2012-01-04 13:31:59 +0200394 list_for_each_entry_safe(p, n, &cache->all, all) {
395 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200396 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200398
399 INIT_LIST_HEAD(&cache->unknown);
400 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401}
402
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300403struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
404 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
Johan Hedberg30883512012-01-04 14:16:21 +0200406 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 struct inquiry_entry *e;
408
409 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
410
Johan Hedberg561aafb2012-01-04 13:31:59 +0200411 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200413 return e;
414 }
415
416 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417}
418
Johan Hedberg561aafb2012-01-04 13:31:59 +0200419struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300420 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200421{
Johan Hedberg30883512012-01-04 14:16:21 +0200422 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200423 struct inquiry_entry *e;
424
425 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426
427 list_for_each_entry(e, &cache->unknown, list) {
428 if (!bacmp(&e->data.bdaddr, bdaddr))
429 return e;
430 }
431
432 return NULL;
433}
434
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200435struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300436 bdaddr_t *bdaddr,
437 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200438{
439 struct discovery_state *cache = &hdev->discovery;
440 struct inquiry_entry *e;
441
442 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
443
444 list_for_each_entry(e, &cache->resolve, list) {
445 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
446 return e;
447 if (!bacmp(&e->data.bdaddr, bdaddr))
448 return e;
449 }
450
451 return NULL;
452}
453
Johan Hedberga3d4e202012-01-09 00:53:02 +0200454void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300455 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200456{
457 struct discovery_state *cache = &hdev->discovery;
458 struct list_head *pos = &cache->resolve;
459 struct inquiry_entry *p;
460
461 list_del(&ie->list);
462
463 list_for_each_entry(p, &cache->resolve, list) {
464 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300465 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200466 break;
467 pos = &p->list;
468 }
469
470 list_add(&ie->list, pos);
471}
472
Johan Hedberg31754052012-01-04 13:39:52 +0200473bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300474 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475{
Johan Hedberg30883512012-01-04 14:16:21 +0200476 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200477 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
480
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200481 if (ssp)
482 *ssp = data->ssp_mode;
483
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200484 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200485 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200486 if (ie->data.ssp_mode && ssp)
487 *ssp = true;
488
Johan Hedberga3d4e202012-01-09 00:53:02 +0200489 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300490 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200491 ie->data.rssi = data->rssi;
492 hci_inquiry_cache_update_resolve(hdev, ie);
493 }
494
Johan Hedberg561aafb2012-01-04 13:31:59 +0200495 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200496 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200497
Johan Hedberg561aafb2012-01-04 13:31:59 +0200498 /* Entry not in the cache. Add new one. */
499 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
500 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200501 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200502
503 list_add(&ie->all, &cache->all);
504
505 if (name_known) {
506 ie->name_state = NAME_KNOWN;
507 } else {
508 ie->name_state = NAME_NOT_KNOWN;
509 list_add(&ie->list, &cache->unknown);
510 }
511
512update:
513 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300514 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200515 ie->name_state = NAME_KNOWN;
516 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 }
518
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200519 memcpy(&ie->data, data, sizeof(*data));
520 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200522
523 if (ie->name_state == NAME_NOT_KNOWN)
524 return false;
525
526 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527}
528
529static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
530{
Johan Hedberg30883512012-01-04 14:16:21 +0200531 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 struct inquiry_info *info = (struct inquiry_info *) buf;
533 struct inquiry_entry *e;
534 int copied = 0;
535
Johan Hedberg561aafb2012-01-04 13:31:59 +0200536 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200538
539 if (copied >= num)
540 break;
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 bacpy(&info->bdaddr, &data->bdaddr);
543 info->pscan_rep_mode = data->pscan_rep_mode;
544 info->pscan_period_mode = data->pscan_period_mode;
545 info->pscan_mode = data->pscan_mode;
546 memcpy(info->dev_class, data->dev_class, 3);
547 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200550 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
552
553 BT_DBG("cache %p, copied %d", cache, copied);
554 return copied;
555}
556
557static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
558{
559 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560 struct hci_cp_inquiry cp;
561
562 BT_DBG("%s", hdev->name);
563
564 if (test_bit(HCI_INQUIRY, &hdev->flags))
565 return;
566
567 /* Start Inquiry */
568 memcpy(&cp.lap, &ir->lap, 3);
569 cp.length = ir->length;
570 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200571 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
573
574int hci_inquiry(void __user *arg)
575{
576 __u8 __user *ptr = arg;
577 struct hci_inquiry_req ir;
578 struct hci_dev *hdev;
579 int err = 0, do_inquiry = 0, max_rsp;
580 long timeo;
581 __u8 *buf;
582
583 if (copy_from_user(&ir, ptr, sizeof(ir)))
584 return -EFAULT;
585
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200586 hdev = hci_dev_get(ir.dev_id);
587 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 return -ENODEV;
589
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300590 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900591 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300592 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 inquiry_cache_flush(hdev);
594 do_inquiry = 1;
595 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300596 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Marcel Holtmann04837f62006-07-03 10:02:33 +0200598 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200599
600 if (do_inquiry) {
601 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
602 if (err < 0)
603 goto done;
604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300606 /* for unlimited number of responses we will use buffer with
607 * 255 entries
608 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
610
611 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
612 * copy it to the user space.
613 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100614 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200615 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 err = -ENOMEM;
617 goto done;
618 }
619
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300620 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300622 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
624 BT_DBG("num_rsp %d", ir.num_rsp);
625
626 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
627 ptr += sizeof(ir);
628 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300629 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900631 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 err = -EFAULT;
633
634 kfree(buf);
635
636done:
637 hci_dev_put(hdev);
638 return err;
639}
640
641/* ---- HCI ioctl helpers ---- */
642
643int hci_dev_open(__u16 dev)
644{
645 struct hci_dev *hdev;
646 int ret = 0;
647
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200648 hdev = hci_dev_get(dev);
649 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 return -ENODEV;
651
652 BT_DBG("%s %p", hdev->name, hdev);
653
654 hci_req_lock(hdev);
655
Johan Hovold94324962012-03-15 14:48:41 +0100656 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
657 ret = -ENODEV;
658 goto done;
659 }
660
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200661 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
662 ret = -ERFKILL;
663 goto done;
664 }
665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 if (test_bit(HCI_UP, &hdev->flags)) {
667 ret = -EALREADY;
668 goto done;
669 }
670
671 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
672 set_bit(HCI_RAW, &hdev->flags);
673
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200674 /* Treat all non BR/EDR controllers as raw devices if
675 enable_hs is not set */
676 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100677 set_bit(HCI_RAW, &hdev->flags);
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 if (hdev->open(hdev)) {
680 ret = -EIO;
681 goto done;
682 }
683
684 if (!test_bit(HCI_RAW, &hdev->flags)) {
685 atomic_set(&hdev->cmd_cnt, 1);
686 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200687 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
Marcel Holtmann04837f62006-07-03 10:02:33 +0200689 ret = __hci_request(hdev, hci_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Andre Guedeseead27d2011-06-30 19:20:55 -0300692 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300693 ret = __hci_request(hdev, hci_le_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 clear_bit(HCI_INIT, &hdev->flags);
697 }
698
699 if (!ret) {
700 hci_dev_hold(hdev);
701 set_bit(HCI_UP, &hdev->flags);
702 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200703 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300704 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200705 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300706 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200707 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900708 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200710 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200711 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400712 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714 skb_queue_purge(&hdev->cmd_q);
715 skb_queue_purge(&hdev->rx_q);
716
717 if (hdev->flush)
718 hdev->flush(hdev);
719
720 if (hdev->sent_cmd) {
721 kfree_skb(hdev->sent_cmd);
722 hdev->sent_cmd = NULL;
723 }
724
725 hdev->close(hdev);
726 hdev->flags = 0;
727 }
728
729done:
730 hci_req_unlock(hdev);
731 hci_dev_put(hdev);
732 return ret;
733}
734
735static int hci_dev_do_close(struct hci_dev *hdev)
736{
737 BT_DBG("%s %p", hdev->name, hdev);
738
Andre Guedes28b75a82012-02-03 17:48:00 -0300739 cancel_work_sync(&hdev->le_scan);
740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 hci_req_cancel(hdev, ENODEV);
742 hci_req_lock(hdev);
743
744 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300745 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 hci_req_unlock(hdev);
747 return 0;
748 }
749
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200750 /* Flush RX and TX works */
751 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400752 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200754 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200755 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200756 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200757 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200758 }
759
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200760 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200761 cancel_delayed_work(&hdev->service_cache);
762
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300763 cancel_delayed_work_sync(&hdev->le_scan_disable);
764
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300765 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 inquiry_cache_flush(hdev);
767 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300768 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770 hci_notify(hdev, HCI_DEV_DOWN);
771
772 if (hdev->flush)
773 hdev->flush(hdev);
774
775 /* Reset device */
776 skb_queue_purge(&hdev->cmd_q);
777 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200778 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200779 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200781 __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300782 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 clear_bit(HCI_INIT, &hdev->flags);
784 }
785
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200786 /* flush cmd work */
787 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789 /* Drop queues */
790 skb_queue_purge(&hdev->rx_q);
791 skb_queue_purge(&hdev->cmd_q);
792 skb_queue_purge(&hdev->raw_q);
793
794 /* Drop last sent command */
795 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300796 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 kfree_skb(hdev->sent_cmd);
798 hdev->sent_cmd = NULL;
799 }
800
801 /* After this point our queues are empty
802 * and no tasks are scheduled. */
803 hdev->close(hdev);
804
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100805 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
806 hci_dev_lock(hdev);
807 mgmt_powered(hdev, 0);
808 hci_dev_unlock(hdev);
809 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 /* Clear flags */
812 hdev->flags = 0;
813
Johan Hedberge59fda82012-02-22 18:11:53 +0200814 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200815 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200816
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 hci_req_unlock(hdev);
818
819 hci_dev_put(hdev);
820 return 0;
821}
822
823int hci_dev_close(__u16 dev)
824{
825 struct hci_dev *hdev;
826 int err;
827
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200828 hdev = hci_dev_get(dev);
829 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100831
832 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
833 cancel_delayed_work(&hdev->power_off);
834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 hci_dev_put(hdev);
838 return err;
839}
840
841int hci_dev_reset(__u16 dev)
842{
843 struct hci_dev *hdev;
844 int ret = 0;
845
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200846 hdev = hci_dev_get(dev);
847 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 return -ENODEV;
849
850 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 if (!test_bit(HCI_UP, &hdev->flags))
853 goto done;
854
855 /* Drop queues */
856 skb_queue_purge(&hdev->rx_q);
857 skb_queue_purge(&hdev->cmd_q);
858
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300859 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 inquiry_cache_flush(hdev);
861 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300862 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 if (hdev->flush)
865 hdev->flush(hdev);
866
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900867 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300868 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200871 ret = __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300872 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 hci_req_unlock(hdev);
876 hci_dev_put(hdev);
877 return ret;
878}
879
880int hci_dev_reset_stat(__u16 dev)
881{
882 struct hci_dev *hdev;
883 int ret = 0;
884
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200885 hdev = hci_dev_get(dev);
886 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 return -ENODEV;
888
889 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
890
891 hci_dev_put(hdev);
892
893 return ret;
894}
895
896int hci_dev_cmd(unsigned int cmd, void __user *arg)
897{
898 struct hci_dev *hdev;
899 struct hci_dev_req dr;
900 int err = 0;
901
902 if (copy_from_user(&dr, arg, sizeof(dr)))
903 return -EFAULT;
904
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200905 hdev = hci_dev_get(dr.dev_id);
906 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 return -ENODEV;
908
909 switch (cmd) {
910 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200911 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300912 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914
915 case HCISETENCRYPT:
916 if (!lmp_encrypt_capable(hdev)) {
917 err = -EOPNOTSUPP;
918 break;
919 }
920
921 if (!test_bit(HCI_AUTH, &hdev->flags)) {
922 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 if (err)
926 break;
927 }
928
Marcel Holtmann04837f62006-07-03 10:02:33 +0200929 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300930 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 break;
932
933 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200934 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300935 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 break;
937
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200938 case HCISETLINKPOL:
939 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300940 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200941 break;
942
943 case HCISETLINKMODE:
944 hdev->link_mode = ((__u16) dr.dev_opt) &
945 (HCI_LM_MASTER | HCI_LM_ACCEPT);
946 break;
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 case HCISETPTYPE:
949 hdev->pkt_type = (__u16) dr.dev_opt;
950 break;
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200953 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
954 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 break;
956
957 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200958 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
959 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 break;
961
962 default:
963 err = -EINVAL;
964 break;
965 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 hci_dev_put(hdev);
968 return err;
969}
970
971int hci_get_dev_list(void __user *arg)
972{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200973 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 struct hci_dev_list_req *dl;
975 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 int n = 0, size, err;
977 __u16 dev_num;
978
979 if (get_user(dev_num, (__u16 __user *) arg))
980 return -EFAULT;
981
982 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
983 return -EINVAL;
984
985 size = sizeof(*dl) + dev_num * sizeof(*dr);
986
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200987 dl = kzalloc(size, GFP_KERNEL);
988 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 return -ENOMEM;
990
991 dr = dl->dev_req;
992
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200993 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200994 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200995 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200996 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200997
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200998 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 (dr + n)->dev_id = hdev->id;
1002 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001003
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 if (++n >= dev_num)
1005 break;
1006 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001007 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009 dl->dev_num = n;
1010 size = sizeof(*dl) + n * sizeof(*dr);
1011
1012 err = copy_to_user(arg, dl, size);
1013 kfree(dl);
1014
1015 return err ? -EFAULT : 0;
1016}
1017
1018int hci_get_dev_info(void __user *arg)
1019{
1020 struct hci_dev *hdev;
1021 struct hci_dev_info di;
1022 int err = 0;
1023
1024 if (copy_from_user(&di, arg, sizeof(di)))
1025 return -EFAULT;
1026
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001027 hdev = hci_dev_get(di.dev_id);
1028 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 return -ENODEV;
1030
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001032 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001033
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001034 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 strcpy(di.name, hdev->name);
1038 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001039 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 di.flags = hdev->flags;
1041 di.pkt_type = hdev->pkt_type;
1042 di.acl_mtu = hdev->acl_mtu;
1043 di.acl_pkts = hdev->acl_pkts;
1044 di.sco_mtu = hdev->sco_mtu;
1045 di.sco_pkts = hdev->sco_pkts;
1046 di.link_policy = hdev->link_policy;
1047 di.link_mode = hdev->link_mode;
1048
1049 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050 memcpy(&di.features, &hdev->features, sizeof(di.features));
1051
1052 if (copy_to_user(arg, &di, sizeof(di)))
1053 err = -EFAULT;
1054
1055 hci_dev_put(hdev);
1056
1057 return err;
1058}
1059
1060/* ---- Interface to HCI drivers ---- */
1061
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001062static int hci_rfkill_set_block(void *data, bool blocked)
1063{
1064 struct hci_dev *hdev = data;
1065
1066 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1067
1068 if (!blocked)
1069 return 0;
1070
1071 hci_dev_do_close(hdev);
1072
1073 return 0;
1074}
1075
1076static const struct rfkill_ops hci_rfkill_ops = {
1077 .set_block = hci_rfkill_set_block,
1078};
1079
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001080static void hci_power_on(struct work_struct *work)
1081{
1082 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1083
1084 BT_DBG("%s", hdev->name);
1085
1086 if (hci_dev_open(hdev->id) < 0)
1087 return;
1088
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001089 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001090 schedule_delayed_work(&hdev->power_off,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001091 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001092
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001093 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001094 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001095}
1096
1097static void hci_power_off(struct work_struct *work)
1098{
Johan Hedberg32435532011-11-07 22:16:04 +02001099 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001100 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001101
1102 BT_DBG("%s", hdev->name);
1103
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001104 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001105}
1106
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001107static void hci_discov_off(struct work_struct *work)
1108{
1109 struct hci_dev *hdev;
1110 u8 scan = SCAN_PAGE;
1111
1112 hdev = container_of(work, struct hci_dev, discov_off.work);
1113
1114 BT_DBG("%s", hdev->name);
1115
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001116 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001117
1118 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1119
1120 hdev->discov_timeout = 0;
1121
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001122 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001123}
1124
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001125int hci_uuids_clear(struct hci_dev *hdev)
1126{
1127 struct list_head *p, *n;
1128
1129 list_for_each_safe(p, n, &hdev->uuids) {
1130 struct bt_uuid *uuid;
1131
1132 uuid = list_entry(p, struct bt_uuid, list);
1133
1134 list_del(p);
1135 kfree(uuid);
1136 }
1137
1138 return 0;
1139}
1140
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001141int hci_link_keys_clear(struct hci_dev *hdev)
1142{
1143 struct list_head *p, *n;
1144
1145 list_for_each_safe(p, n, &hdev->link_keys) {
1146 struct link_key *key;
1147
1148 key = list_entry(p, struct link_key, list);
1149
1150 list_del(p);
1151 kfree(key);
1152 }
1153
1154 return 0;
1155}
1156
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001157int hci_smp_ltks_clear(struct hci_dev *hdev)
1158{
1159 struct smp_ltk *k, *tmp;
1160
1161 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1162 list_del(&k->list);
1163 kfree(k);
1164 }
1165
1166 return 0;
1167}
1168
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001169struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1170{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001171 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001172
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001173 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001174 if (bacmp(bdaddr, &k->bdaddr) == 0)
1175 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001176
1177 return NULL;
1178}
1179
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301180static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001181 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001182{
1183 /* Legacy key */
1184 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301185 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001186
1187 /* Debug keys are insecure so don't store them persistently */
1188 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301189 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001190
1191 /* Changed combination key and there's no previous one */
1192 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301193 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001194
1195 /* Security mode 3 case */
1196 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301197 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001198
1199 /* Neither local nor remote side had no-bonding as requirement */
1200 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301201 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001202
1203 /* Local side had dedicated bonding as requirement */
1204 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301205 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001206
1207 /* Remote side had dedicated bonding as requirement */
1208 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301209 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001210
1211 /* If none of the above criteria match, then don't store the key
1212 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301213 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001214}
1215
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001216struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001217{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001218 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001219
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001220 list_for_each_entry(k, &hdev->long_term_keys, list) {
1221 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001222 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001223 continue;
1224
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001225 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001226 }
1227
1228 return NULL;
1229}
1230EXPORT_SYMBOL(hci_find_ltk);
1231
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001232struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001233 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001236
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001237 list_for_each_entry(k, &hdev->long_term_keys, list)
1238 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001239 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001240 return k;
1241
1242 return NULL;
1243}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001244EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001245
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001246int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001247 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001248{
1249 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301250 u8 old_key_type;
1251 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001252
1253 old_key = hci_find_link_key(hdev, bdaddr);
1254 if (old_key) {
1255 old_key_type = old_key->type;
1256 key = old_key;
1257 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001258 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001259 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1260 if (!key)
1261 return -ENOMEM;
1262 list_add(&key->list, &hdev->link_keys);
1263 }
1264
1265 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1266
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001267 /* Some buggy controller combinations generate a changed
1268 * combination key for legacy pairing even when there's no
1269 * previous key */
1270 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001271 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001272 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001273 if (conn)
1274 conn->key_type = type;
1275 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001276
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001277 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001278 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001279 key->pin_len = pin_len;
1280
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001281 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001282 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001283 else
1284 key->type = type;
1285
Johan Hedberg4df378a2011-04-28 11:29:03 -07001286 if (!new_key)
1287 return 0;
1288
1289 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1290
Johan Hedberg744cf192011-11-08 20:40:14 +02001291 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001292
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301293 if (conn)
1294 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001295
1296 return 0;
1297}
1298
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001299int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001300 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001301 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001302{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001303 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001304
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001305 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1306 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001307
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001308 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1309 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001310 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001311 else {
1312 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001313 if (!key)
1314 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001315 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001316 }
1317
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001318 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001319 key->bdaddr_type = addr_type;
1320 memcpy(key->val, tk, sizeof(key->val));
1321 key->authenticated = authenticated;
1322 key->ediv = ediv;
1323 key->enc_size = enc_size;
1324 key->type = type;
1325 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001327 if (!new_key)
1328 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001329
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001330 if (type & HCI_SMP_LTK)
1331 mgmt_new_ltk(hdev, key, 1);
1332
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001333 return 0;
1334}
1335
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001336int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1337{
1338 struct link_key *key;
1339
1340 key = hci_find_link_key(hdev, bdaddr);
1341 if (!key)
1342 return -ENOENT;
1343
1344 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1345
1346 list_del(&key->list);
1347 kfree(key);
1348
1349 return 0;
1350}
1351
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001352int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1353{
1354 struct smp_ltk *k, *tmp;
1355
1356 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1357 if (bacmp(bdaddr, &k->bdaddr))
1358 continue;
1359
1360 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1361
1362 list_del(&k->list);
1363 kfree(k);
1364 }
1365
1366 return 0;
1367}
1368
Ville Tervo6bd32322011-02-16 16:32:41 +02001369/* HCI command timer function */
1370static void hci_cmd_timer(unsigned long arg)
1371{
1372 struct hci_dev *hdev = (void *) arg;
1373
1374 BT_ERR("%s command tx timeout", hdev->name);
1375 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001376 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001377}
1378
Szymon Janc2763eda2011-03-22 13:12:22 +01001379struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001380 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001381{
1382 struct oob_data *data;
1383
1384 list_for_each_entry(data, &hdev->remote_oob_data, list)
1385 if (bacmp(bdaddr, &data->bdaddr) == 0)
1386 return data;
1387
1388 return NULL;
1389}
1390
1391int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1392{
1393 struct oob_data *data;
1394
1395 data = hci_find_remote_oob_data(hdev, bdaddr);
1396 if (!data)
1397 return -ENOENT;
1398
1399 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1400
1401 list_del(&data->list);
1402 kfree(data);
1403
1404 return 0;
1405}
1406
1407int hci_remote_oob_data_clear(struct hci_dev *hdev)
1408{
1409 struct oob_data *data, *n;
1410
1411 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1412 list_del(&data->list);
1413 kfree(data);
1414 }
1415
1416 return 0;
1417}
1418
1419int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001420 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001421{
1422 struct oob_data *data;
1423
1424 data = hci_find_remote_oob_data(hdev, bdaddr);
1425
1426 if (!data) {
1427 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1428 if (!data)
1429 return -ENOMEM;
1430
1431 bacpy(&data->bdaddr, bdaddr);
1432 list_add(&data->list, &hdev->remote_oob_data);
1433 }
1434
1435 memcpy(data->hash, hash, sizeof(data->hash));
1436 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1437
1438 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1439
1440 return 0;
1441}
1442
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001443struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001444{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001445 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001446
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001447 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001448 if (bacmp(bdaddr, &b->bdaddr) == 0)
1449 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001450
1451 return NULL;
1452}
1453
1454int hci_blacklist_clear(struct hci_dev *hdev)
1455{
1456 struct list_head *p, *n;
1457
1458 list_for_each_safe(p, n, &hdev->blacklist) {
1459 struct bdaddr_list *b;
1460
1461 b = list_entry(p, struct bdaddr_list, list);
1462
1463 list_del(p);
1464 kfree(b);
1465 }
1466
1467 return 0;
1468}
1469
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001470int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001471{
1472 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001473
1474 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1475 return -EBADF;
1476
Antti Julku5e762442011-08-25 16:48:02 +03001477 if (hci_blacklist_lookup(hdev, bdaddr))
1478 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001479
1480 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001481 if (!entry)
1482 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001483
1484 bacpy(&entry->bdaddr, bdaddr);
1485
1486 list_add(&entry->list, &hdev->blacklist);
1487
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001488 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001489}
1490
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001491int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001492{
1493 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001494
Szymon Janc1ec918c2011-11-16 09:32:21 +01001495 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001496 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001497
1498 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001499 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001500 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001501
1502 list_del(&entry->list);
1503 kfree(entry);
1504
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001505 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001506}
1507
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001508static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1509{
1510 struct le_scan_params *param = (struct le_scan_params *) opt;
1511 struct hci_cp_le_set_scan_param cp;
1512
1513 memset(&cp, 0, sizeof(cp));
1514 cp.type = param->type;
1515 cp.interval = cpu_to_le16(param->interval);
1516 cp.window = cpu_to_le16(param->window);
1517
1518 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1519}
1520
1521static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1522{
1523 struct hci_cp_le_set_scan_enable cp;
1524
1525 memset(&cp, 0, sizeof(cp));
1526 cp.enable = 1;
1527
1528 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1529}
1530
1531static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001532 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001533{
1534 long timeo = msecs_to_jiffies(3000);
1535 struct le_scan_params param;
1536 int err;
1537
1538 BT_DBG("%s", hdev->name);
1539
1540 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1541 return -EINPROGRESS;
1542
1543 param.type = type;
1544 param.interval = interval;
1545 param.window = window;
1546
1547 hci_req_lock(hdev);
1548
1549 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001550 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001551 if (!err)
1552 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1553
1554 hci_req_unlock(hdev);
1555
1556 if (err < 0)
1557 return err;
1558
1559 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001560 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001561
1562 return 0;
1563}
1564
Andre Guedes7dbfac12012-03-15 16:52:07 -03001565int hci_cancel_le_scan(struct hci_dev *hdev)
1566{
1567 BT_DBG("%s", hdev->name);
1568
1569 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1570 return -EALREADY;
1571
1572 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1573 struct hci_cp_le_set_scan_enable cp;
1574
1575 /* Send HCI command to disable LE Scan */
1576 memset(&cp, 0, sizeof(cp));
1577 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1578 }
1579
1580 return 0;
1581}
1582
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001583static void le_scan_disable_work(struct work_struct *work)
1584{
1585 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001586 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001587 struct hci_cp_le_set_scan_enable cp;
1588
1589 BT_DBG("%s", hdev->name);
1590
1591 memset(&cp, 0, sizeof(cp));
1592
1593 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1594}
1595
Andre Guedes28b75a82012-02-03 17:48:00 -03001596static void le_scan_work(struct work_struct *work)
1597{
1598 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1599 struct le_scan_params *param = &hdev->le_scan_params;
1600
1601 BT_DBG("%s", hdev->name);
1602
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001603 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1604 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001605}
1606
1607int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001608 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001609{
1610 struct le_scan_params *param = &hdev->le_scan_params;
1611
1612 BT_DBG("%s", hdev->name);
1613
1614 if (work_busy(&hdev->le_scan))
1615 return -EINPROGRESS;
1616
1617 param->type = type;
1618 param->interval = interval;
1619 param->window = window;
1620 param->timeout = timeout;
1621
1622 queue_work(system_long_wq, &hdev->le_scan);
1623
1624 return 0;
1625}
1626
David Herrmann9be0dab2012-04-22 14:39:57 +02001627/* Alloc HCI device */
1628struct hci_dev *hci_alloc_dev(void)
1629{
1630 struct hci_dev *hdev;
1631
1632 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1633 if (!hdev)
1634 return NULL;
1635
David Herrmannb1b813d2012-04-22 14:39:58 +02001636 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1637 hdev->esco_type = (ESCO_HV1);
1638 hdev->link_mode = (HCI_LM_ACCEPT);
1639 hdev->io_capability = 0x03; /* No Input No Output */
1640
David Herrmannb1b813d2012-04-22 14:39:58 +02001641 hdev->sniff_max_interval = 800;
1642 hdev->sniff_min_interval = 80;
1643
1644 mutex_init(&hdev->lock);
1645 mutex_init(&hdev->req_lock);
1646
1647 INIT_LIST_HEAD(&hdev->mgmt_pending);
1648 INIT_LIST_HEAD(&hdev->blacklist);
1649 INIT_LIST_HEAD(&hdev->uuids);
1650 INIT_LIST_HEAD(&hdev->link_keys);
1651 INIT_LIST_HEAD(&hdev->long_term_keys);
1652 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001653
1654 INIT_WORK(&hdev->rx_work, hci_rx_work);
1655 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1656 INIT_WORK(&hdev->tx_work, hci_tx_work);
1657 INIT_WORK(&hdev->power_on, hci_power_on);
1658 INIT_WORK(&hdev->le_scan, le_scan_work);
1659
David Herrmannb1b813d2012-04-22 14:39:58 +02001660 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1661 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1662 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1663
David Herrmann9be0dab2012-04-22 14:39:57 +02001664 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001665 skb_queue_head_init(&hdev->rx_q);
1666 skb_queue_head_init(&hdev->cmd_q);
1667 skb_queue_head_init(&hdev->raw_q);
1668
1669 init_waitqueue_head(&hdev->req_wait_q);
1670
1671 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1672
David Herrmannb1b813d2012-04-22 14:39:58 +02001673 hci_init_sysfs(hdev);
1674 discovery_init(hdev);
1675 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001676
1677 return hdev;
1678}
1679EXPORT_SYMBOL(hci_alloc_dev);
1680
1681/* Free HCI device */
1682void hci_free_dev(struct hci_dev *hdev)
1683{
1684 skb_queue_purge(&hdev->driver_init);
1685
1686 /* will free via device release */
1687 put_device(&hdev->dev);
1688}
1689EXPORT_SYMBOL(hci_free_dev);
1690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691/* Register HCI device */
1692int hci_register_dev(struct hci_dev *hdev)
1693{
Ulisses Furquimfc507442012-04-18 12:13:04 -03001694 struct list_head *head, *p;
David Herrmannb1b813d2012-04-22 14:39:58 +02001695 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
David Herrmann010666a2012-01-07 15:47:07 +01001697 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 return -EINVAL;
1699
Ulisses Furquimfc507442012-04-18 12:13:04 -03001700 write_lock(&hci_dev_list_lock);
1701
Mat Martineau08add512011-11-02 16:18:36 -07001702 /* Do not allow HCI_AMP devices to register at index 0,
1703 * so the index can be used as the AMP controller ID.
1704 */
1705 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001706 head = &hci_dev_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708 /* Find first available device id */
1709 list_for_each(p, &hci_dev_list) {
Ulisses Furquimfc507442012-04-18 12:13:04 -03001710 int nid = list_entry(p, struct hci_dev, list)->id;
1711 if (nid > id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 break;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001713 if (nid == id)
1714 id++;
1715 head = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001717
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 sprintf(hdev->name, "hci%d", id);
1719 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001720
1721 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1722
Ulisses Furquimfc507442012-04-18 12:13:04 -03001723 list_add(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001725 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001727 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001728 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001729 if (!hdev->workqueue) {
1730 error = -ENOMEM;
1731 goto err;
1732 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001733
David Herrmann33ca9542011-10-08 14:58:49 +02001734 error = hci_add_sysfs(hdev);
1735 if (error < 0)
1736 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001738 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001739 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1740 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001741 if (hdev->rfkill) {
1742 if (rfkill_register(hdev->rfkill) < 0) {
1743 rfkill_destroy(hdev->rfkill);
1744 hdev->rfkill = NULL;
1745 }
1746 }
1747
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001748 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1749 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001750 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001751
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001753 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
1755 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001756
David Herrmann33ca9542011-10-08 14:58:49 +02001757err_wqueue:
1758 destroy_workqueue(hdev->workqueue);
1759err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001760 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001761 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001762 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001763
David Herrmann33ca9542011-10-08 14:58:49 +02001764 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765}
1766EXPORT_SYMBOL(hci_register_dev);
1767
1768/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001769void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770{
Marcel Holtmannef222012007-07-11 06:42:04 +02001771 int i;
1772
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001773 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
Johan Hovold94324962012-03-15 14:48:41 +01001775 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1776
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001777 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001779 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
1781 hci_dev_do_close(hdev);
1782
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301783 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001784 kfree_skb(hdev->reassembly[i]);
1785
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001786 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001787 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001788 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001789 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001790 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001791 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001792
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001793 /* mgmt_index_removed should take care of emptying the
1794 * pending list */
1795 BUG_ON(!list_empty(&hdev->mgmt_pending));
1796
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 hci_notify(hdev, HCI_DEV_UNREG);
1798
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001799 if (hdev->rfkill) {
1800 rfkill_unregister(hdev->rfkill);
1801 rfkill_destroy(hdev->rfkill);
1802 }
1803
David Herrmannce242972011-10-08 14:58:48 +02001804 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001805
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001806 destroy_workqueue(hdev->workqueue);
1807
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001808 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001809 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001810 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001811 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001812 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001813 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001814 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001815
David Herrmanndc946bd2012-01-07 15:47:24 +01001816 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817}
1818EXPORT_SYMBOL(hci_unregister_dev);
1819
1820/* Suspend HCI device */
1821int hci_suspend_dev(struct hci_dev *hdev)
1822{
1823 hci_notify(hdev, HCI_DEV_SUSPEND);
1824 return 0;
1825}
1826EXPORT_SYMBOL(hci_suspend_dev);
1827
1828/* Resume HCI device */
1829int hci_resume_dev(struct hci_dev *hdev)
1830{
1831 hci_notify(hdev, HCI_DEV_RESUME);
1832 return 0;
1833}
1834EXPORT_SYMBOL(hci_resume_dev);
1835
Marcel Holtmann76bca882009-11-18 00:40:39 +01001836/* Receive frame from HCI drivers */
1837int hci_recv_frame(struct sk_buff *skb)
1838{
1839 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1840 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001841 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001842 kfree_skb(skb);
1843 return -ENXIO;
1844 }
1845
1846 /* Incomming skb */
1847 bt_cb(skb)->incoming = 1;
1848
1849 /* Time stamp */
1850 __net_timestamp(skb);
1851
Marcel Holtmann76bca882009-11-18 00:40:39 +01001852 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001853 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001854
Marcel Holtmann76bca882009-11-18 00:40:39 +01001855 return 0;
1856}
1857EXPORT_SYMBOL(hci_recv_frame);
1858
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301859static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001860 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301861{
1862 int len = 0;
1863 int hlen = 0;
1864 int remain = count;
1865 struct sk_buff *skb;
1866 struct bt_skb_cb *scb;
1867
1868 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001869 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301870 return -EILSEQ;
1871
1872 skb = hdev->reassembly[index];
1873
1874 if (!skb) {
1875 switch (type) {
1876 case HCI_ACLDATA_PKT:
1877 len = HCI_MAX_FRAME_SIZE;
1878 hlen = HCI_ACL_HDR_SIZE;
1879 break;
1880 case HCI_EVENT_PKT:
1881 len = HCI_MAX_EVENT_SIZE;
1882 hlen = HCI_EVENT_HDR_SIZE;
1883 break;
1884 case HCI_SCODATA_PKT:
1885 len = HCI_MAX_SCO_SIZE;
1886 hlen = HCI_SCO_HDR_SIZE;
1887 break;
1888 }
1889
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001890 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301891 if (!skb)
1892 return -ENOMEM;
1893
1894 scb = (void *) skb->cb;
1895 scb->expect = hlen;
1896 scb->pkt_type = type;
1897
1898 skb->dev = (void *) hdev;
1899 hdev->reassembly[index] = skb;
1900 }
1901
1902 while (count) {
1903 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001904 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301905
1906 memcpy(skb_put(skb, len), data, len);
1907
1908 count -= len;
1909 data += len;
1910 scb->expect -= len;
1911 remain = count;
1912
1913 switch (type) {
1914 case HCI_EVENT_PKT:
1915 if (skb->len == HCI_EVENT_HDR_SIZE) {
1916 struct hci_event_hdr *h = hci_event_hdr(skb);
1917 scb->expect = h->plen;
1918
1919 if (skb_tailroom(skb) < scb->expect) {
1920 kfree_skb(skb);
1921 hdev->reassembly[index] = NULL;
1922 return -ENOMEM;
1923 }
1924 }
1925 break;
1926
1927 case HCI_ACLDATA_PKT:
1928 if (skb->len == HCI_ACL_HDR_SIZE) {
1929 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1930 scb->expect = __le16_to_cpu(h->dlen);
1931
1932 if (skb_tailroom(skb) < scb->expect) {
1933 kfree_skb(skb);
1934 hdev->reassembly[index] = NULL;
1935 return -ENOMEM;
1936 }
1937 }
1938 break;
1939
1940 case HCI_SCODATA_PKT:
1941 if (skb->len == HCI_SCO_HDR_SIZE) {
1942 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1943 scb->expect = h->dlen;
1944
1945 if (skb_tailroom(skb) < scb->expect) {
1946 kfree_skb(skb);
1947 hdev->reassembly[index] = NULL;
1948 return -ENOMEM;
1949 }
1950 }
1951 break;
1952 }
1953
1954 if (scb->expect == 0) {
1955 /* Complete frame */
1956
1957 bt_cb(skb)->pkt_type = type;
1958 hci_recv_frame(skb);
1959
1960 hdev->reassembly[index] = NULL;
1961 return remain;
1962 }
1963 }
1964
1965 return remain;
1966}
1967
Marcel Holtmannef222012007-07-11 06:42:04 +02001968int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1969{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301970 int rem = 0;
1971
Marcel Holtmannef222012007-07-11 06:42:04 +02001972 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1973 return -EILSEQ;
1974
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001975 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001976 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301977 if (rem < 0)
1978 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001979
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301980 data += (count - rem);
1981 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001982 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001983
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301984 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001985}
1986EXPORT_SYMBOL(hci_recv_fragment);
1987
Suraj Sumangala99811512010-07-14 13:02:19 +05301988#define STREAM_REASSEMBLY 0
1989
1990int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1991{
1992 int type;
1993 int rem = 0;
1994
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001995 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301996 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1997
1998 if (!skb) {
1999 struct { char type; } *pkt;
2000
2001 /* Start of the frame */
2002 pkt = data;
2003 type = pkt->type;
2004
2005 data++;
2006 count--;
2007 } else
2008 type = bt_cb(skb)->pkt_type;
2009
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002010 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002011 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302012 if (rem < 0)
2013 return rem;
2014
2015 data += (count - rem);
2016 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002017 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302018
2019 return rem;
2020}
2021EXPORT_SYMBOL(hci_recv_stream_fragment);
2022
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023/* ---- Interface to upper protocols ---- */
2024
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025int hci_register_cb(struct hci_cb *cb)
2026{
2027 BT_DBG("%p name %s", cb, cb->name);
2028
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002029 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002031 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
2033 return 0;
2034}
2035EXPORT_SYMBOL(hci_register_cb);
2036
2037int hci_unregister_cb(struct hci_cb *cb)
2038{
2039 BT_DBG("%p name %s", cb, cb->name);
2040
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002041 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002043 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045 return 0;
2046}
2047EXPORT_SYMBOL(hci_unregister_cb);
2048
2049static int hci_send_frame(struct sk_buff *skb)
2050{
2051 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2052
2053 if (!hdev) {
2054 kfree_skb(skb);
2055 return -ENODEV;
2056 }
2057
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002058 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002060 /* Time stamp */
2061 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002063 /* Send copy to monitor */
2064 hci_send_to_monitor(hdev, skb);
2065
2066 if (atomic_read(&hdev->promisc)) {
2067 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002068 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 }
2070
2071 /* Get rid of skb owner, prior to sending to the driver. */
2072 skb_orphan(skb);
2073
2074 return hdev->send(skb);
2075}
2076
2077/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002078int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079{
2080 int len = HCI_COMMAND_HDR_SIZE + plen;
2081 struct hci_command_hdr *hdr;
2082 struct sk_buff *skb;
2083
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002084 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
2086 skb = bt_skb_alloc(len, GFP_ATOMIC);
2087 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002088 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 return -ENOMEM;
2090 }
2091
2092 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002093 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 hdr->plen = plen;
2095
2096 if (plen)
2097 memcpy(skb_put(skb, plen), param, plen);
2098
2099 BT_DBG("skb len %d", skb->len);
2100
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002101 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002103
Johan Hedberga5040ef2011-01-10 13:28:59 +02002104 if (test_bit(HCI_INIT, &hdev->flags))
2105 hdev->init_last_cmd = opcode;
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002108 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
2110 return 0;
2111}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
2113/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002114void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115{
2116 struct hci_command_hdr *hdr;
2117
2118 if (!hdev->sent_cmd)
2119 return NULL;
2120
2121 hdr = (void *) hdev->sent_cmd->data;
2122
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002123 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 return NULL;
2125
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002126 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
2128 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2129}
2130
2131/* Send ACL data */
2132static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2133{
2134 struct hci_acl_hdr *hdr;
2135 int len = skb->len;
2136
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002137 skb_push(skb, HCI_ACL_HDR_SIZE);
2138 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002139 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002140 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2141 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142}
2143
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002144static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002145 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146{
2147 struct hci_dev *hdev = conn->hdev;
2148 struct sk_buff *list;
2149
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002150 skb->len = skb_headlen(skb);
2151 skb->data_len = 0;
2152
2153 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2154 hci_add_acl_hdr(skb, conn->handle, flags);
2155
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002156 list = skb_shinfo(skb)->frag_list;
2157 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 /* Non fragmented */
2159 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2160
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002161 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 } else {
2163 /* Fragmented */
2164 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2165
2166 skb_shinfo(skb)->frag_list = NULL;
2167
2168 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002169 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002171 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002172
2173 flags &= ~ACL_START;
2174 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 do {
2176 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002177
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002179 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002180 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
2182 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2183
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002184 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 } while (list);
2186
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002187 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002189}
2190
2191void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2192{
2193 struct hci_conn *conn = chan->conn;
2194 struct hci_dev *hdev = conn->hdev;
2195
2196 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2197
2198 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002199
2200 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002202 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203}
2204EXPORT_SYMBOL(hci_send_acl);
2205
2206/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002207void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208{
2209 struct hci_dev *hdev = conn->hdev;
2210 struct hci_sco_hdr hdr;
2211
2212 BT_DBG("%s len %d", hdev->name, skb->len);
2213
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002214 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 hdr.dlen = skb->len;
2216
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002217 skb_push(skb, HCI_SCO_HDR_SIZE);
2218 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002219 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
2221 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002222 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002223
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002225 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226}
2227EXPORT_SYMBOL(hci_send_sco);
2228
2229/* ---- HCI TX task (outgoing data) ---- */
2230
2231/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002232static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2233 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234{
2235 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002236 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002237 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002239 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002241
2242 rcu_read_lock();
2243
2244 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002245 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002247
2248 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2249 continue;
2250
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 num++;
2252
2253 if (c->sent < min) {
2254 min = c->sent;
2255 conn = c;
2256 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002257
2258 if (hci_conn_num(hdev, type) == num)
2259 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 }
2261
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002262 rcu_read_unlock();
2263
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002265 int cnt, q;
2266
2267 switch (conn->type) {
2268 case ACL_LINK:
2269 cnt = hdev->acl_cnt;
2270 break;
2271 case SCO_LINK:
2272 case ESCO_LINK:
2273 cnt = hdev->sco_cnt;
2274 break;
2275 case LE_LINK:
2276 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2277 break;
2278 default:
2279 cnt = 0;
2280 BT_ERR("Unknown link type");
2281 }
2282
2283 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 *quote = q ? q : 1;
2285 } else
2286 *quote = 0;
2287
2288 BT_DBG("conn %p quote %d", conn, *quote);
2289 return conn;
2290}
2291
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002292static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293{
2294 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002295 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
Ville Tervobae1f5d92011-02-10 22:38:53 -03002297 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002299 rcu_read_lock();
2300
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002302 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002303 if (c->type == type && c->sent) {
2304 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002305 hdev->name, batostr(&c->dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 hci_acl_disconn(c, 0x13);
2307 }
2308 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002309
2310 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311}
2312
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002313static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2314 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002315{
2316 struct hci_conn_hash *h = &hdev->conn_hash;
2317 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002318 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002319 struct hci_conn *conn;
2320 int cnt, q, conn_num = 0;
2321
2322 BT_DBG("%s", hdev->name);
2323
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002324 rcu_read_lock();
2325
2326 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002327 struct hci_chan *tmp;
2328
2329 if (conn->type != type)
2330 continue;
2331
2332 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2333 continue;
2334
2335 conn_num++;
2336
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002337 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002338 struct sk_buff *skb;
2339
2340 if (skb_queue_empty(&tmp->data_q))
2341 continue;
2342
2343 skb = skb_peek(&tmp->data_q);
2344 if (skb->priority < cur_prio)
2345 continue;
2346
2347 if (skb->priority > cur_prio) {
2348 num = 0;
2349 min = ~0;
2350 cur_prio = skb->priority;
2351 }
2352
2353 num++;
2354
2355 if (conn->sent < min) {
2356 min = conn->sent;
2357 chan = tmp;
2358 }
2359 }
2360
2361 if (hci_conn_num(hdev, type) == conn_num)
2362 break;
2363 }
2364
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002365 rcu_read_unlock();
2366
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002367 if (!chan)
2368 return NULL;
2369
2370 switch (chan->conn->type) {
2371 case ACL_LINK:
2372 cnt = hdev->acl_cnt;
2373 break;
2374 case SCO_LINK:
2375 case ESCO_LINK:
2376 cnt = hdev->sco_cnt;
2377 break;
2378 case LE_LINK:
2379 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2380 break;
2381 default:
2382 cnt = 0;
2383 BT_ERR("Unknown link type");
2384 }
2385
2386 q = cnt / num;
2387 *quote = q ? q : 1;
2388 BT_DBG("chan %p quote %d", chan, *quote);
2389 return chan;
2390}
2391
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002392static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2393{
2394 struct hci_conn_hash *h = &hdev->conn_hash;
2395 struct hci_conn *conn;
2396 int num = 0;
2397
2398 BT_DBG("%s", hdev->name);
2399
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002400 rcu_read_lock();
2401
2402 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002403 struct hci_chan *chan;
2404
2405 if (conn->type != type)
2406 continue;
2407
2408 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2409 continue;
2410
2411 num++;
2412
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002413 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002414 struct sk_buff *skb;
2415
2416 if (chan->sent) {
2417 chan->sent = 0;
2418 continue;
2419 }
2420
2421 if (skb_queue_empty(&chan->data_q))
2422 continue;
2423
2424 skb = skb_peek(&chan->data_q);
2425 if (skb->priority >= HCI_PRIO_MAX - 1)
2426 continue;
2427
2428 skb->priority = HCI_PRIO_MAX - 1;
2429
2430 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002431 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002432 }
2433
2434 if (hci_conn_num(hdev, type) == num)
2435 break;
2436 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002437
2438 rcu_read_unlock();
2439
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002440}
2441
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002442static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2443{
2444 /* Calculate count of blocks used by this packet */
2445 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2446}
2447
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002448static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 if (!test_bit(HCI_RAW, &hdev->flags)) {
2451 /* ACL tx timeout must be longer than maximum
2452 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002453 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002454 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002455 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002457}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002459static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002460{
2461 unsigned int cnt = hdev->acl_cnt;
2462 struct hci_chan *chan;
2463 struct sk_buff *skb;
2464 int quote;
2465
2466 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002467
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002468 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002469 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002470 u32 priority = (skb_peek(&chan->data_q))->priority;
2471 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002472 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002473 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002474
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002475 /* Stop if priority has changed */
2476 if (skb->priority < priority)
2477 break;
2478
2479 skb = skb_dequeue(&chan->data_q);
2480
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002481 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002482 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002483
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 hci_send_frame(skb);
2485 hdev->acl_last_tx = jiffies;
2486
2487 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002488 chan->sent++;
2489 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 }
2491 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002492
2493 if (cnt != hdev->acl_cnt)
2494 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495}
2496
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002497static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002498{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002499 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002500 struct hci_chan *chan;
2501 struct sk_buff *skb;
2502 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002503
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002504 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002505
2506 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002507 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002508 u32 priority = (skb_peek(&chan->data_q))->priority;
2509 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2510 int blocks;
2511
2512 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002513 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002514
2515 /* Stop if priority has changed */
2516 if (skb->priority < priority)
2517 break;
2518
2519 skb = skb_dequeue(&chan->data_q);
2520
2521 blocks = __get_blocks(hdev, skb);
2522 if (blocks > hdev->block_cnt)
2523 return;
2524
2525 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002526 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002527
2528 hci_send_frame(skb);
2529 hdev->acl_last_tx = jiffies;
2530
2531 hdev->block_cnt -= blocks;
2532 quote -= blocks;
2533
2534 chan->sent += blocks;
2535 chan->conn->sent += blocks;
2536 }
2537 }
2538
2539 if (cnt != hdev->block_cnt)
2540 hci_prio_recalculate(hdev, ACL_LINK);
2541}
2542
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002543static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002544{
2545 BT_DBG("%s", hdev->name);
2546
2547 if (!hci_conn_num(hdev, ACL_LINK))
2548 return;
2549
2550 switch (hdev->flow_ctl_mode) {
2551 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2552 hci_sched_acl_pkt(hdev);
2553 break;
2554
2555 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2556 hci_sched_acl_blk(hdev);
2557 break;
2558 }
2559}
2560
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002562static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563{
2564 struct hci_conn *conn;
2565 struct sk_buff *skb;
2566 int quote;
2567
2568 BT_DBG("%s", hdev->name);
2569
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002570 if (!hci_conn_num(hdev, SCO_LINK))
2571 return;
2572
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2574 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2575 BT_DBG("skb %p len %d", skb, skb->len);
2576 hci_send_frame(skb);
2577
2578 conn->sent++;
2579 if (conn->sent == ~0)
2580 conn->sent = 0;
2581 }
2582 }
2583}
2584
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002585static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002586{
2587 struct hci_conn *conn;
2588 struct sk_buff *skb;
2589 int quote;
2590
2591 BT_DBG("%s", hdev->name);
2592
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002593 if (!hci_conn_num(hdev, ESCO_LINK))
2594 return;
2595
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002596 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2597 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002598 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2599 BT_DBG("skb %p len %d", skb, skb->len);
2600 hci_send_frame(skb);
2601
2602 conn->sent++;
2603 if (conn->sent == ~0)
2604 conn->sent = 0;
2605 }
2606 }
2607}
2608
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002609static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002610{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002611 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002612 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002613 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002614
2615 BT_DBG("%s", hdev->name);
2616
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002617 if (!hci_conn_num(hdev, LE_LINK))
2618 return;
2619
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002620 if (!test_bit(HCI_RAW, &hdev->flags)) {
2621 /* LE tx timeout must be longer than maximum
2622 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002623 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002624 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002625 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002626 }
2627
2628 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002629 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002630 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002631 u32 priority = (skb_peek(&chan->data_q))->priority;
2632 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002633 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002634 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002635
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002636 /* Stop if priority has changed */
2637 if (skb->priority < priority)
2638 break;
2639
2640 skb = skb_dequeue(&chan->data_q);
2641
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002642 hci_send_frame(skb);
2643 hdev->le_last_tx = jiffies;
2644
2645 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002646 chan->sent++;
2647 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002648 }
2649 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002650
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002651 if (hdev->le_pkts)
2652 hdev->le_cnt = cnt;
2653 else
2654 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002655
2656 if (cnt != tmp)
2657 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002658}
2659
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002660static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002662 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 struct sk_buff *skb;
2664
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002665 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002666 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
2668 /* Schedule queues and send stuff to HCI driver */
2669
2670 hci_sched_acl(hdev);
2671
2672 hci_sched_sco(hdev);
2673
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002674 hci_sched_esco(hdev);
2675
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002676 hci_sched_le(hdev);
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 /* Send next queued raw (unknown type) packet */
2679 while ((skb = skb_dequeue(&hdev->raw_q)))
2680 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681}
2682
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002683/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684
2685/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002686static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687{
2688 struct hci_acl_hdr *hdr = (void *) skb->data;
2689 struct hci_conn *conn;
2690 __u16 handle, flags;
2691
2692 skb_pull(skb, HCI_ACL_HDR_SIZE);
2693
2694 handle = __le16_to_cpu(hdr->handle);
2695 flags = hci_flags(handle);
2696 handle = hci_handle(handle);
2697
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002698 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2699 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
2701 hdev->stat.acl_rx++;
2702
2703 hci_dev_lock(hdev);
2704 conn = hci_conn_hash_lookup_handle(hdev, handle);
2705 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002706
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002708 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002709
Johan Hedberg671267b2012-05-12 16:11:50 -03002710 hci_dev_lock(hdev);
2711 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2712 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2713 mgmt_device_connected(hdev, &conn->dst, conn->type,
2714 conn->dst_type, 0, NULL, 0,
2715 conn->dev_class);
2716 hci_dev_unlock(hdev);
2717
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002719 l2cap_recv_acldata(conn, skb, flags);
2720 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002722 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002723 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 }
2725
2726 kfree_skb(skb);
2727}
2728
2729/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002730static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731{
2732 struct hci_sco_hdr *hdr = (void *) skb->data;
2733 struct hci_conn *conn;
2734 __u16 handle;
2735
2736 skb_pull(skb, HCI_SCO_HDR_SIZE);
2737
2738 handle = __le16_to_cpu(hdr->handle);
2739
2740 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2741
2742 hdev->stat.sco_rx++;
2743
2744 hci_dev_lock(hdev);
2745 conn = hci_conn_hash_lookup_handle(hdev, handle);
2746 hci_dev_unlock(hdev);
2747
2748 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002750 sco_recv_scodata(conn, skb);
2751 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002753 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002754 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 }
2756
2757 kfree_skb(skb);
2758}
2759
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002760static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002762 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 struct sk_buff *skb;
2764
2765 BT_DBG("%s", hdev->name);
2766
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002768 /* Send copy to monitor */
2769 hci_send_to_monitor(hdev, skb);
2770
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 if (atomic_read(&hdev->promisc)) {
2772 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002773 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 }
2775
2776 if (test_bit(HCI_RAW, &hdev->flags)) {
2777 kfree_skb(skb);
2778 continue;
2779 }
2780
2781 if (test_bit(HCI_INIT, &hdev->flags)) {
2782 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002783 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 case HCI_ACLDATA_PKT:
2785 case HCI_SCODATA_PKT:
2786 kfree_skb(skb);
2787 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 }
2790
2791 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002792 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002794 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 hci_event_packet(hdev, skb);
2796 break;
2797
2798 case HCI_ACLDATA_PKT:
2799 BT_DBG("%s ACL data packet", hdev->name);
2800 hci_acldata_packet(hdev, skb);
2801 break;
2802
2803 case HCI_SCODATA_PKT:
2804 BT_DBG("%s SCO data packet", hdev->name);
2805 hci_scodata_packet(hdev, skb);
2806 break;
2807
2808 default:
2809 kfree_skb(skb);
2810 break;
2811 }
2812 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813}
2814
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002815static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002817 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 struct sk_buff *skb;
2819
2820 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2821
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002823 if (atomic_read(&hdev->cmd_cnt)) {
2824 skb = skb_dequeue(&hdev->cmd_q);
2825 if (!skb)
2826 return;
2827
Wei Yongjun7585b972009-02-25 18:29:52 +08002828 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002830 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2831 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 atomic_dec(&hdev->cmd_cnt);
2833 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002834 if (test_bit(HCI_RESET, &hdev->flags))
2835 del_timer(&hdev->cmd_timer);
2836 else
2837 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002838 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 } else {
2840 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002841 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 }
2843 }
2844}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002845
2846int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2847{
2848 /* General inquiry access code (GIAC) */
2849 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2850 struct hci_cp_inquiry cp;
2851
2852 BT_DBG("%s", hdev->name);
2853
2854 if (test_bit(HCI_INQUIRY, &hdev->flags))
2855 return -EINPROGRESS;
2856
Johan Hedberg46632622012-01-02 16:06:08 +02002857 inquiry_cache_flush(hdev);
2858
Andre Guedes2519a1f2011-11-07 11:45:24 -03002859 memset(&cp, 0, sizeof(cp));
2860 memcpy(&cp.lap, lap, sizeof(cp.lap));
2861 cp.length = length;
2862
2863 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2864}
Andre Guedes023d50492011-11-04 14:16:52 -03002865
2866int hci_cancel_inquiry(struct hci_dev *hdev)
2867{
2868 BT_DBG("%s", hdev->name);
2869
2870 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002871 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002872
2873 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2874}
Andre Guedes31f79562012-04-24 21:02:53 -03002875
2876u8 bdaddr_to_le(u8 bdaddr_type)
2877{
2878 switch (bdaddr_type) {
2879 case BDADDR_LE_PUBLIC:
2880 return ADDR_LE_DEV_PUBLIC;
2881
2882 default:
2883 /* Fallback to LE Random address type */
2884 return ADDR_LE_DEV_RANDOM;
2885 }
2886}