blob: 32dcb09cdb5dee9e58903954f85c3030f6867686 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700134 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Johan Hedberga5040ef2011-01-10 13:28:59 +0200146 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 int ret;
158
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300175 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200179static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200181 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800182 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200183 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 /* Mandatory initialization */
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200192 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Optional initialization */
211
212 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200213 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Connection accept timeout ~20 secs */
Andrei Emeltchenko82781e62012-05-25 11:38:27 +0300217 param = __constant_cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200225static void amp_init(struct hci_dev *hdev)
226{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300231
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200234}
235
236static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
237{
238 struct sk_buff *skb;
239
240 BT_DBG("%s %ld", hdev->name, opt);
241
242 /* Driver initialization */
243
244 /* Special commands */
245 while ((skb = skb_dequeue(&hdev->driver_init))) {
246 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
247 skb->dev = (void *) hdev;
248
249 skb_queue_tail(&hdev->cmd_q, skb);
250 queue_work(hdev->workqueue, &hdev->cmd_work);
251 }
252 skb_queue_purge(&hdev->driver_init);
253
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300254 /* Reset */
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
256 hci_reset_req(hdev, 0);
257
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200258 switch (hdev->dev_type) {
259 case HCI_BREDR:
260 bredr_init(hdev);
261 break;
262
263 case HCI_AMP:
264 amp_init(hdev);
265 break;
266
267 default:
268 BT_ERR("Unknown device type %d", hdev->dev_type);
269 break;
270 }
271
272}
273
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300274static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
275{
276 BT_DBG("%s", hdev->name);
277
278 /* Read LE buffer size */
279 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
280}
281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
283{
284 __u8 scan = opt;
285
286 BT_DBG("%s %x", hdev->name, scan);
287
288 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200289 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290}
291
292static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
293{
294 __u8 auth = opt;
295
296 BT_DBG("%s %x", hdev->name, auth);
297
298 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200299 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
302static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
303{
304 __u8 encrypt = opt;
305
306 BT_DBG("%s %x", hdev->name, encrypt);
307
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200308 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200309 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
311
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200312static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
313{
314 __le16 policy = cpu_to_le16(opt);
315
Marcel Holtmanna418b892008-11-30 12:17:28 +0100316 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200317
318 /* Default link policy */
319 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
320}
321
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900322/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 * Device is held on return. */
324struct hci_dev *hci_dev_get(int index)
325{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200326 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
328 BT_DBG("%d", index);
329
330 if (index < 0)
331 return NULL;
332
333 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200334 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (d->id == index) {
336 hdev = hci_dev_hold(d);
337 break;
338 }
339 }
340 read_unlock(&hci_dev_list_lock);
341 return hdev;
342}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200345
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200346bool hci_discovery_active(struct hci_dev *hdev)
347{
348 struct discovery_state *discov = &hdev->discovery;
349
Andre Guedes6fbe1952012-02-03 17:47:58 -0300350 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300351 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300352 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200353 return true;
354
Andre Guedes6fbe1952012-02-03 17:47:58 -0300355 default:
356 return false;
357 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200358}
359
Johan Hedbergff9ef572012-01-04 14:23:45 +0200360void hci_discovery_set_state(struct hci_dev *hdev, int state)
361{
362 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
363
364 if (hdev->discovery.state == state)
365 return;
366
367 switch (state) {
368 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300369 if (hdev->discovery.state != DISCOVERY_STARTING)
370 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200371 break;
372 case DISCOVERY_STARTING:
373 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300374 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200375 mgmt_discovering(hdev, 1);
376 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200377 case DISCOVERY_RESOLVING:
378 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200379 case DISCOVERY_STOPPING:
380 break;
381 }
382
383 hdev->discovery.state = state;
384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386static void inquiry_cache_flush(struct hci_dev *hdev)
387{
Johan Hedberg30883512012-01-04 14:16:21 +0200388 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200389 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Johan Hedberg561aafb2012-01-04 13:31:59 +0200391 list_for_each_entry_safe(p, n, &cache->all, all) {
392 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200393 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200395
396 INIT_LIST_HEAD(&cache->unknown);
397 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398}
399
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300400struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
401 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
Johan Hedberg30883512012-01-04 14:16:21 +0200403 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 struct inquiry_entry *e;
405
406 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
407
Johan Hedberg561aafb2012-01-04 13:31:59 +0200408 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200410 return e;
411 }
412
413 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414}
415
Johan Hedberg561aafb2012-01-04 13:31:59 +0200416struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300417 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200418{
Johan Hedberg30883512012-01-04 14:16:21 +0200419 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200420 struct inquiry_entry *e;
421
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
424 list_for_each_entry(e, &cache->unknown, list) {
425 if (!bacmp(&e->data.bdaddr, bdaddr))
426 return e;
427 }
428
429 return NULL;
430}
431
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200432struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300433 bdaddr_t *bdaddr,
434 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200435{
436 struct discovery_state *cache = &hdev->discovery;
437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
440
441 list_for_each_entry(e, &cache->resolve, list) {
442 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
443 return e;
444 if (!bacmp(&e->data.bdaddr, bdaddr))
445 return e;
446 }
447
448 return NULL;
449}
450
Johan Hedberga3d4e202012-01-09 00:53:02 +0200451void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300452 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200453{
454 struct discovery_state *cache = &hdev->discovery;
455 struct list_head *pos = &cache->resolve;
456 struct inquiry_entry *p;
457
458 list_del(&ie->list);
459
460 list_for_each_entry(p, &cache->resolve, list) {
461 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300462 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200463 break;
464 pos = &p->list;
465 }
466
467 list_add(&ie->list, pos);
468}
469
Johan Hedberg31754052012-01-04 13:39:52 +0200470bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300471 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472{
Johan Hedberg30883512012-01-04 14:16:21 +0200473 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200474 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
477
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200478 if (ssp)
479 *ssp = data->ssp_mode;
480
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200481 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200482 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200483 if (ie->data.ssp_mode && ssp)
484 *ssp = true;
485
Johan Hedberga3d4e202012-01-09 00:53:02 +0200486 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300487 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200488 ie->data.rssi = data->rssi;
489 hci_inquiry_cache_update_resolve(hdev, ie);
490 }
491
Johan Hedberg561aafb2012-01-04 13:31:59 +0200492 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200493 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200494
Johan Hedberg561aafb2012-01-04 13:31:59 +0200495 /* Entry not in the cache. Add new one. */
496 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
497 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200498 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200499
500 list_add(&ie->all, &cache->all);
501
502 if (name_known) {
503 ie->name_state = NAME_KNOWN;
504 } else {
505 ie->name_state = NAME_NOT_KNOWN;
506 list_add(&ie->list, &cache->unknown);
507 }
508
509update:
510 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300511 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200512 ie->name_state = NAME_KNOWN;
513 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 }
515
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200516 memcpy(&ie->data, data, sizeof(*data));
517 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200519
520 if (ie->name_state == NAME_NOT_KNOWN)
521 return false;
522
523 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524}
525
526static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
527{
Johan Hedberg30883512012-01-04 14:16:21 +0200528 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 struct inquiry_info *info = (struct inquiry_info *) buf;
530 struct inquiry_entry *e;
531 int copied = 0;
532
Johan Hedberg561aafb2012-01-04 13:31:59 +0200533 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200535
536 if (copied >= num)
537 break;
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 bacpy(&info->bdaddr, &data->bdaddr);
540 info->pscan_rep_mode = data->pscan_rep_mode;
541 info->pscan_period_mode = data->pscan_period_mode;
542 info->pscan_mode = data->pscan_mode;
543 memcpy(info->dev_class, data->dev_class, 3);
544 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200547 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 }
549
550 BT_DBG("cache %p, copied %d", cache, copied);
551 return copied;
552}
553
554static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
555{
556 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
557 struct hci_cp_inquiry cp;
558
559 BT_DBG("%s", hdev->name);
560
561 if (test_bit(HCI_INQUIRY, &hdev->flags))
562 return;
563
564 /* Start Inquiry */
565 memcpy(&cp.lap, &ir->lap, 3);
566 cp.length = ir->length;
567 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200568 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
570
571int hci_inquiry(void __user *arg)
572{
573 __u8 __user *ptr = arg;
574 struct hci_inquiry_req ir;
575 struct hci_dev *hdev;
576 int err = 0, do_inquiry = 0, max_rsp;
577 long timeo;
578 __u8 *buf;
579
580 if (copy_from_user(&ir, ptr, sizeof(ir)))
581 return -EFAULT;
582
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200583 hdev = hci_dev_get(ir.dev_id);
584 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 return -ENODEV;
586
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300587 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900588 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300589 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 inquiry_cache_flush(hdev);
591 do_inquiry = 1;
592 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300593 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Marcel Holtmann04837f62006-07-03 10:02:33 +0200595 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200596
597 if (do_inquiry) {
598 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
599 if (err < 0)
600 goto done;
601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300603 /* for unlimited number of responses we will use buffer with
604 * 255 entries
605 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
607
608 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
609 * copy it to the user space.
610 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100611 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200612 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 err = -ENOMEM;
614 goto done;
615 }
616
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300617 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300619 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 BT_DBG("num_rsp %d", ir.num_rsp);
622
623 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
624 ptr += sizeof(ir);
625 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300626 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900628 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = -EFAULT;
630
631 kfree(buf);
632
633done:
634 hci_dev_put(hdev);
635 return err;
636}
637
638/* ---- HCI ioctl helpers ---- */
639
640int hci_dev_open(__u16 dev)
641{
642 struct hci_dev *hdev;
643 int ret = 0;
644
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200645 hdev = hci_dev_get(dev);
646 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 return -ENODEV;
648
649 BT_DBG("%s %p", hdev->name, hdev);
650
651 hci_req_lock(hdev);
652
Johan Hovold94324962012-03-15 14:48:41 +0100653 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
654 ret = -ENODEV;
655 goto done;
656 }
657
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200658 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
659 ret = -ERFKILL;
660 goto done;
661 }
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (test_bit(HCI_UP, &hdev->flags)) {
664 ret = -EALREADY;
665 goto done;
666 }
667
668 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
669 set_bit(HCI_RAW, &hdev->flags);
670
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200671 /* Treat all non BR/EDR controllers as raw devices if
672 enable_hs is not set */
673 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100674 set_bit(HCI_RAW, &hdev->flags);
675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (hdev->open(hdev)) {
677 ret = -EIO;
678 goto done;
679 }
680
681 if (!test_bit(HCI_RAW, &hdev->flags)) {
682 atomic_set(&hdev->cmd_cnt, 1);
683 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200684 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300686 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Andre Guedeseead27d2011-06-30 19:20:55 -0300688 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300689 ret = __hci_request(hdev, hci_le_init_req, 0,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300690 HCI_INIT_TIMEOUT);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 clear_bit(HCI_INIT, &hdev->flags);
693 }
694
695 if (!ret) {
696 hci_dev_hold(hdev);
697 set_bit(HCI_UP, &hdev->flags);
698 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200699 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300700 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200701 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300702 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200703 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900704 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200706 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200707 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400708 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710 skb_queue_purge(&hdev->cmd_q);
711 skb_queue_purge(&hdev->rx_q);
712
713 if (hdev->flush)
714 hdev->flush(hdev);
715
716 if (hdev->sent_cmd) {
717 kfree_skb(hdev->sent_cmd);
718 hdev->sent_cmd = NULL;
719 }
720
721 hdev->close(hdev);
722 hdev->flags = 0;
723 }
724
725done:
726 hci_req_unlock(hdev);
727 hci_dev_put(hdev);
728 return ret;
729}
730
731static int hci_dev_do_close(struct hci_dev *hdev)
732{
733 BT_DBG("%s %p", hdev->name, hdev);
734
Andre Guedes28b75a82012-02-03 17:48:00 -0300735 cancel_work_sync(&hdev->le_scan);
736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 hci_req_cancel(hdev, ENODEV);
738 hci_req_lock(hdev);
739
740 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300741 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 hci_req_unlock(hdev);
743 return 0;
744 }
745
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200746 /* Flush RX and TX works */
747 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400748 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200750 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200751 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200752 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200753 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200754 }
755
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200756 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200757 cancel_delayed_work(&hdev->service_cache);
758
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300759 cancel_delayed_work_sync(&hdev->le_scan_disable);
760
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300761 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 inquiry_cache_flush(hdev);
763 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300764 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766 hci_notify(hdev, HCI_DEV_DOWN);
767
768 if (hdev->flush)
769 hdev->flush(hdev);
770
771 /* Reset device */
772 skb_queue_purge(&hdev->cmd_q);
773 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200774 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200775 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 set_bit(HCI_INIT, &hdev->flags);
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300777 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 clear_bit(HCI_INIT, &hdev->flags);
779 }
780
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200781 /* flush cmd work */
782 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
784 /* Drop queues */
785 skb_queue_purge(&hdev->rx_q);
786 skb_queue_purge(&hdev->cmd_q);
787 skb_queue_purge(&hdev->raw_q);
788
789 /* Drop last sent command */
790 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300791 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 kfree_skb(hdev->sent_cmd);
793 hdev->sent_cmd = NULL;
794 }
795
796 /* After this point our queues are empty
797 * and no tasks are scheduled. */
798 hdev->close(hdev);
799
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100800 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
801 hci_dev_lock(hdev);
802 mgmt_powered(hdev, 0);
803 hci_dev_unlock(hdev);
804 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 /* Clear flags */
807 hdev->flags = 0;
808
Johan Hedberge59fda82012-02-22 18:11:53 +0200809 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200810 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 hci_req_unlock(hdev);
813
814 hci_dev_put(hdev);
815 return 0;
816}
817
818int hci_dev_close(__u16 dev)
819{
820 struct hci_dev *hdev;
821 int err;
822
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200823 hdev = hci_dev_get(dev);
824 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100826
827 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
828 cancel_delayed_work(&hdev->power_off);
829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 hci_dev_put(hdev);
833 return err;
834}
835
836int hci_dev_reset(__u16 dev)
837{
838 struct hci_dev *hdev;
839 int ret = 0;
840
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200841 hdev = hci_dev_get(dev);
842 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return -ENODEV;
844
845 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847 if (!test_bit(HCI_UP, &hdev->flags))
848 goto done;
849
850 /* Drop queues */
851 skb_queue_purge(&hdev->rx_q);
852 skb_queue_purge(&hdev->cmd_q);
853
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300854 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 inquiry_cache_flush(hdev);
856 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300857 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
859 if (hdev->flush)
860 hdev->flush(hdev);
861
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900862 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300863 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 if (!test_bit(HCI_RAW, &hdev->flags))
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300866 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 hci_req_unlock(hdev);
870 hci_dev_put(hdev);
871 return ret;
872}
873
874int hci_dev_reset_stat(__u16 dev)
875{
876 struct hci_dev *hdev;
877 int ret = 0;
878
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200879 hdev = hci_dev_get(dev);
880 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 return -ENODEV;
882
883 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
884
885 hci_dev_put(hdev);
886
887 return ret;
888}
889
890int hci_dev_cmd(unsigned int cmd, void __user *arg)
891{
892 struct hci_dev *hdev;
893 struct hci_dev_req dr;
894 int err = 0;
895
896 if (copy_from_user(&dr, arg, sizeof(dr)))
897 return -EFAULT;
898
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200899 hdev = hci_dev_get(dr.dev_id);
900 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 return -ENODEV;
902
903 switch (cmd) {
904 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200905 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300906 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 break;
908
909 case HCISETENCRYPT:
910 if (!lmp_encrypt_capable(hdev)) {
911 err = -EOPNOTSUPP;
912 break;
913 }
914
915 if (!test_bit(HCI_AUTH, &hdev->flags)) {
916 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200917 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300918 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 if (err)
920 break;
921 }
922
Marcel Holtmann04837f62006-07-03 10:02:33 +0200923 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300924 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 break;
926
927 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200928 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300929 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 break;
931
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200932 case HCISETLINKPOL:
933 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300934 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200935 break;
936
937 case HCISETLINKMODE:
938 hdev->link_mode = ((__u16) dr.dev_opt) &
939 (HCI_LM_MASTER | HCI_LM_ACCEPT);
940 break;
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 case HCISETPTYPE:
943 hdev->pkt_type = (__u16) dr.dev_opt;
944 break;
945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200947 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
948 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 break;
950
951 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200952 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
953 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 break;
955
956 default:
957 err = -EINVAL;
958 break;
959 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 hci_dev_put(hdev);
962 return err;
963}
964
965int hci_get_dev_list(void __user *arg)
966{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200967 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 struct hci_dev_list_req *dl;
969 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 int n = 0, size, err;
971 __u16 dev_num;
972
973 if (get_user(dev_num, (__u16 __user *) arg))
974 return -EFAULT;
975
976 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
977 return -EINVAL;
978
979 size = sizeof(*dl) + dev_num * sizeof(*dr);
980
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200981 dl = kzalloc(size, GFP_KERNEL);
982 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 return -ENOMEM;
984
985 dr = dl->dev_req;
986
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200987 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200988 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200989 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200990 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200991
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200992 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
993 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 (dr + n)->dev_id = hdev->id;
996 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 if (++n >= dev_num)
999 break;
1000 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001001 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 dl->dev_num = n;
1004 size = sizeof(*dl) + n * sizeof(*dr);
1005
1006 err = copy_to_user(arg, dl, size);
1007 kfree(dl);
1008
1009 return err ? -EFAULT : 0;
1010}
1011
1012int hci_get_dev_info(void __user *arg)
1013{
1014 struct hci_dev *hdev;
1015 struct hci_dev_info di;
1016 int err = 0;
1017
1018 if (copy_from_user(&di, arg, sizeof(di)))
1019 return -EFAULT;
1020
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001021 hdev = hci_dev_get(di.dev_id);
1022 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 return -ENODEV;
1024
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001025 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001026 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001027
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001028 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1029 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001030
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 strcpy(di.name, hdev->name);
1032 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001033 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 di.flags = hdev->flags;
1035 di.pkt_type = hdev->pkt_type;
1036 di.acl_mtu = hdev->acl_mtu;
1037 di.acl_pkts = hdev->acl_pkts;
1038 di.sco_mtu = hdev->sco_mtu;
1039 di.sco_pkts = hdev->sco_pkts;
1040 di.link_policy = hdev->link_policy;
1041 di.link_mode = hdev->link_mode;
1042
1043 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1044 memcpy(&di.features, &hdev->features, sizeof(di.features));
1045
1046 if (copy_to_user(arg, &di, sizeof(di)))
1047 err = -EFAULT;
1048
1049 hci_dev_put(hdev);
1050
1051 return err;
1052}
1053
1054/* ---- Interface to HCI drivers ---- */
1055
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001056static int hci_rfkill_set_block(void *data, bool blocked)
1057{
1058 struct hci_dev *hdev = data;
1059
1060 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1061
1062 if (!blocked)
1063 return 0;
1064
1065 hci_dev_do_close(hdev);
1066
1067 return 0;
1068}
1069
1070static const struct rfkill_ops hci_rfkill_ops = {
1071 .set_block = hci_rfkill_set_block,
1072};
1073
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001074static void hci_power_on(struct work_struct *work)
1075{
1076 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1077
1078 BT_DBG("%s", hdev->name);
1079
1080 if (hci_dev_open(hdev->id) < 0)
1081 return;
1082
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001083 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Andrei Emeltchenko9345d402012-06-15 10:36:42 +03001084 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001085
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001086 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001087 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001088}
1089
1090static void hci_power_off(struct work_struct *work)
1091{
Johan Hedberg32435532011-11-07 22:16:04 +02001092 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001093 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001094
1095 BT_DBG("%s", hdev->name);
1096
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001097 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001098}
1099
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001100static void hci_discov_off(struct work_struct *work)
1101{
1102 struct hci_dev *hdev;
1103 u8 scan = SCAN_PAGE;
1104
1105 hdev = container_of(work, struct hci_dev, discov_off.work);
1106
1107 BT_DBG("%s", hdev->name);
1108
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001109 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001110
1111 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1112
1113 hdev->discov_timeout = 0;
1114
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001115 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001116}
1117
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001118int hci_uuids_clear(struct hci_dev *hdev)
1119{
1120 struct list_head *p, *n;
1121
1122 list_for_each_safe(p, n, &hdev->uuids) {
1123 struct bt_uuid *uuid;
1124
1125 uuid = list_entry(p, struct bt_uuid, list);
1126
1127 list_del(p);
1128 kfree(uuid);
1129 }
1130
1131 return 0;
1132}
1133
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001134int hci_link_keys_clear(struct hci_dev *hdev)
1135{
1136 struct list_head *p, *n;
1137
1138 list_for_each_safe(p, n, &hdev->link_keys) {
1139 struct link_key *key;
1140
1141 key = list_entry(p, struct link_key, list);
1142
1143 list_del(p);
1144 kfree(key);
1145 }
1146
1147 return 0;
1148}
1149
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001150int hci_smp_ltks_clear(struct hci_dev *hdev)
1151{
1152 struct smp_ltk *k, *tmp;
1153
1154 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1155 list_del(&k->list);
1156 kfree(k);
1157 }
1158
1159 return 0;
1160}
1161
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001162struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1163{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001164 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001165
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001166 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001167 if (bacmp(bdaddr, &k->bdaddr) == 0)
1168 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001169
1170 return NULL;
1171}
1172
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301173static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001174 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001175{
1176 /* Legacy key */
1177 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301178 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001179
1180 /* Debug keys are insecure so don't store them persistently */
1181 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301182 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001183
1184 /* Changed combination key and there's no previous one */
1185 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301186 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001187
1188 /* Security mode 3 case */
1189 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301190 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001191
1192 /* Neither local nor remote side had no-bonding as requirement */
1193 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301194 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001195
1196 /* Local side had dedicated bonding as requirement */
1197 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301198 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001199
1200 /* Remote side had dedicated bonding as requirement */
1201 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301202 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001203
1204 /* If none of the above criteria match, then don't store the key
1205 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301206 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001207}
1208
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001209struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001210{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001211 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001212
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001213 list_for_each_entry(k, &hdev->long_term_keys, list) {
1214 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001215 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001216 continue;
1217
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001218 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001219 }
1220
1221 return NULL;
1222}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001223
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001224struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001225 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001226{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001227 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001228
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001229 list_for_each_entry(k, &hdev->long_term_keys, list)
1230 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001231 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001232 return k;
1233
1234 return NULL;
1235}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001236
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001237int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001238 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001239{
1240 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301241 u8 old_key_type;
1242 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001243
1244 old_key = hci_find_link_key(hdev, bdaddr);
1245 if (old_key) {
1246 old_key_type = old_key->type;
1247 key = old_key;
1248 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001249 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001250 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1251 if (!key)
1252 return -ENOMEM;
1253 list_add(&key->list, &hdev->link_keys);
1254 }
1255
1256 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1257
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001258 /* Some buggy controller combinations generate a changed
1259 * combination key for legacy pairing even when there's no
1260 * previous key */
1261 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001262 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001263 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001264 if (conn)
1265 conn->key_type = type;
1266 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001267
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001268 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001269 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001270 key->pin_len = pin_len;
1271
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001272 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001273 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001274 else
1275 key->type = type;
1276
Johan Hedberg4df378a2011-04-28 11:29:03 -07001277 if (!new_key)
1278 return 0;
1279
1280 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1281
Johan Hedberg744cf192011-11-08 20:40:14 +02001282 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001283
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301284 if (conn)
1285 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001286
1287 return 0;
1288}
1289
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001290int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001291 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001292 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001293{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001294 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001295
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001296 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1297 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001298
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001299 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1300 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001301 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001302 else {
1303 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001304 if (!key)
1305 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001306 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001307 }
1308
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001309 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001310 key->bdaddr_type = addr_type;
1311 memcpy(key->val, tk, sizeof(key->val));
1312 key->authenticated = authenticated;
1313 key->ediv = ediv;
1314 key->enc_size = enc_size;
1315 key->type = type;
1316 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001317
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001318 if (!new_key)
1319 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001320
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001321 if (type & HCI_SMP_LTK)
1322 mgmt_new_ltk(hdev, key, 1);
1323
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324 return 0;
1325}
1326
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001327int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1328{
1329 struct link_key *key;
1330
1331 key = hci_find_link_key(hdev, bdaddr);
1332 if (!key)
1333 return -ENOENT;
1334
1335 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1336
1337 list_del(&key->list);
1338 kfree(key);
1339
1340 return 0;
1341}
1342
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001343int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1344{
1345 struct smp_ltk *k, *tmp;
1346
1347 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1348 if (bacmp(bdaddr, &k->bdaddr))
1349 continue;
1350
1351 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1352
1353 list_del(&k->list);
1354 kfree(k);
1355 }
1356
1357 return 0;
1358}
1359
Ville Tervo6bd32322011-02-16 16:32:41 +02001360/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001361static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001362{
1363 struct hci_dev *hdev = (void *) arg;
1364
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001365 if (hdev->sent_cmd) {
1366 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1367 u16 opcode = __le16_to_cpu(sent->opcode);
1368
1369 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1370 } else {
1371 BT_ERR("%s command tx timeout", hdev->name);
1372 }
1373
Ville Tervo6bd32322011-02-16 16:32:41 +02001374 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001375 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001376}
1377
Szymon Janc2763eda2011-03-22 13:12:22 +01001378struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001379 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001380{
1381 struct oob_data *data;
1382
1383 list_for_each_entry(data, &hdev->remote_oob_data, list)
1384 if (bacmp(bdaddr, &data->bdaddr) == 0)
1385 return data;
1386
1387 return NULL;
1388}
1389
1390int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1391{
1392 struct oob_data *data;
1393
1394 data = hci_find_remote_oob_data(hdev, bdaddr);
1395 if (!data)
1396 return -ENOENT;
1397
1398 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1399
1400 list_del(&data->list);
1401 kfree(data);
1402
1403 return 0;
1404}
1405
1406int hci_remote_oob_data_clear(struct hci_dev *hdev)
1407{
1408 struct oob_data *data, *n;
1409
1410 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1411 list_del(&data->list);
1412 kfree(data);
1413 }
1414
1415 return 0;
1416}
1417
1418int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001419 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001420{
1421 struct oob_data *data;
1422
1423 data = hci_find_remote_oob_data(hdev, bdaddr);
1424
1425 if (!data) {
1426 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1427 if (!data)
1428 return -ENOMEM;
1429
1430 bacpy(&data->bdaddr, bdaddr);
1431 list_add(&data->list, &hdev->remote_oob_data);
1432 }
1433
1434 memcpy(data->hash, hash, sizeof(data->hash));
1435 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1436
1437 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1438
1439 return 0;
1440}
1441
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001442struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001443{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001444 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001445
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001446 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001447 if (bacmp(bdaddr, &b->bdaddr) == 0)
1448 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001449
1450 return NULL;
1451}
1452
1453int hci_blacklist_clear(struct hci_dev *hdev)
1454{
1455 struct list_head *p, *n;
1456
1457 list_for_each_safe(p, n, &hdev->blacklist) {
1458 struct bdaddr_list *b;
1459
1460 b = list_entry(p, struct bdaddr_list, list);
1461
1462 list_del(p);
1463 kfree(b);
1464 }
1465
1466 return 0;
1467}
1468
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001469int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001470{
1471 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001472
1473 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1474 return -EBADF;
1475
Antti Julku5e762442011-08-25 16:48:02 +03001476 if (hci_blacklist_lookup(hdev, bdaddr))
1477 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001478
1479 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001480 if (!entry)
1481 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001482
1483 bacpy(&entry->bdaddr, bdaddr);
1484
1485 list_add(&entry->list, &hdev->blacklist);
1486
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001487 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001488}
1489
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001490int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001491{
1492 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001493
Szymon Janc1ec918c2011-11-16 09:32:21 +01001494 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001495 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001496
1497 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001498 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001499 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001500
1501 list_del(&entry->list);
1502 kfree(entry);
1503
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001504 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001505}
1506
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001507static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1508{
1509 struct le_scan_params *param = (struct le_scan_params *) opt;
1510 struct hci_cp_le_set_scan_param cp;
1511
1512 memset(&cp, 0, sizeof(cp));
1513 cp.type = param->type;
1514 cp.interval = cpu_to_le16(param->interval);
1515 cp.window = cpu_to_le16(param->window);
1516
1517 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1518}
1519
1520static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1521{
1522 struct hci_cp_le_set_scan_enable cp;
1523
1524 memset(&cp, 0, sizeof(cp));
1525 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001526 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001527
1528 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1529}
1530
1531static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001532 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001533{
1534 long timeo = msecs_to_jiffies(3000);
1535 struct le_scan_params param;
1536 int err;
1537
1538 BT_DBG("%s", hdev->name);
1539
1540 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1541 return -EINPROGRESS;
1542
1543 param.type = type;
1544 param.interval = interval;
1545 param.window = window;
1546
1547 hci_req_lock(hdev);
1548
1549 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001550 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001551 if (!err)
1552 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1553
1554 hci_req_unlock(hdev);
1555
1556 if (err < 0)
1557 return err;
1558
1559 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001560 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001561
1562 return 0;
1563}
1564
Andre Guedes7dbfac12012-03-15 16:52:07 -03001565int hci_cancel_le_scan(struct hci_dev *hdev)
1566{
1567 BT_DBG("%s", hdev->name);
1568
1569 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1570 return -EALREADY;
1571
1572 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1573 struct hci_cp_le_set_scan_enable cp;
1574
1575 /* Send HCI command to disable LE Scan */
1576 memset(&cp, 0, sizeof(cp));
1577 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1578 }
1579
1580 return 0;
1581}
1582
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001583static void le_scan_disable_work(struct work_struct *work)
1584{
1585 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001586 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001587 struct hci_cp_le_set_scan_enable cp;
1588
1589 BT_DBG("%s", hdev->name);
1590
1591 memset(&cp, 0, sizeof(cp));
1592
1593 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1594}
1595
Andre Guedes28b75a82012-02-03 17:48:00 -03001596static void le_scan_work(struct work_struct *work)
1597{
1598 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1599 struct le_scan_params *param = &hdev->le_scan_params;
1600
1601 BT_DBG("%s", hdev->name);
1602
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001603 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1604 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001605}
1606
1607int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001608 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001609{
1610 struct le_scan_params *param = &hdev->le_scan_params;
1611
1612 BT_DBG("%s", hdev->name);
1613
1614 if (work_busy(&hdev->le_scan))
1615 return -EINPROGRESS;
1616
1617 param->type = type;
1618 param->interval = interval;
1619 param->window = window;
1620 param->timeout = timeout;
1621
1622 queue_work(system_long_wq, &hdev->le_scan);
1623
1624 return 0;
1625}
1626
David Herrmann9be0dab2012-04-22 14:39:57 +02001627/* Alloc HCI device */
1628struct hci_dev *hci_alloc_dev(void)
1629{
1630 struct hci_dev *hdev;
1631
1632 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1633 if (!hdev)
1634 return NULL;
1635
David Herrmannb1b813d2012-04-22 14:39:58 +02001636 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1637 hdev->esco_type = (ESCO_HV1);
1638 hdev->link_mode = (HCI_LM_ACCEPT);
1639 hdev->io_capability = 0x03; /* No Input No Output */
1640
David Herrmannb1b813d2012-04-22 14:39:58 +02001641 hdev->sniff_max_interval = 800;
1642 hdev->sniff_min_interval = 80;
1643
1644 mutex_init(&hdev->lock);
1645 mutex_init(&hdev->req_lock);
1646
1647 INIT_LIST_HEAD(&hdev->mgmt_pending);
1648 INIT_LIST_HEAD(&hdev->blacklist);
1649 INIT_LIST_HEAD(&hdev->uuids);
1650 INIT_LIST_HEAD(&hdev->link_keys);
1651 INIT_LIST_HEAD(&hdev->long_term_keys);
1652 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001653
1654 INIT_WORK(&hdev->rx_work, hci_rx_work);
1655 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1656 INIT_WORK(&hdev->tx_work, hci_tx_work);
1657 INIT_WORK(&hdev->power_on, hci_power_on);
1658 INIT_WORK(&hdev->le_scan, le_scan_work);
1659
David Herrmannb1b813d2012-04-22 14:39:58 +02001660 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1661 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1662 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1663
David Herrmann9be0dab2012-04-22 14:39:57 +02001664 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001665 skb_queue_head_init(&hdev->rx_q);
1666 skb_queue_head_init(&hdev->cmd_q);
1667 skb_queue_head_init(&hdev->raw_q);
1668
1669 init_waitqueue_head(&hdev->req_wait_q);
1670
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001671 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001672
David Herrmannb1b813d2012-04-22 14:39:58 +02001673 hci_init_sysfs(hdev);
1674 discovery_init(hdev);
1675 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001676
1677 return hdev;
1678}
1679EXPORT_SYMBOL(hci_alloc_dev);
1680
1681/* Free HCI device */
1682void hci_free_dev(struct hci_dev *hdev)
1683{
1684 skb_queue_purge(&hdev->driver_init);
1685
1686 /* will free via device release */
1687 put_device(&hdev->dev);
1688}
1689EXPORT_SYMBOL(hci_free_dev);
1690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691/* Register HCI device */
1692int hci_register_dev(struct hci_dev *hdev)
1693{
David Herrmannb1b813d2012-04-22 14:39:58 +02001694 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
David Herrmann010666a2012-01-07 15:47:07 +01001696 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 return -EINVAL;
1698
Mat Martineau08add512011-11-02 16:18:36 -07001699 /* Do not allow HCI_AMP devices to register at index 0,
1700 * so the index can be used as the AMP controller ID.
1701 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001702 switch (hdev->dev_type) {
1703 case HCI_BREDR:
1704 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1705 break;
1706 case HCI_AMP:
1707 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1708 break;
1709 default:
1710 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001712
Sasha Levin3df92b32012-05-27 22:36:56 +02001713 if (id < 0)
1714 return id;
1715
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 sprintf(hdev->name, "hci%d", id);
1717 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001718
1719 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1720
Sasha Levin3df92b32012-05-27 22:36:56 +02001721 write_lock(&hci_dev_list_lock);
1722 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001723 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001725 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001726 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001727 if (!hdev->workqueue) {
1728 error = -ENOMEM;
1729 goto err;
1730 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001731
David Herrmann33ca9542011-10-08 14:58:49 +02001732 error = hci_add_sysfs(hdev);
1733 if (error < 0)
1734 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001736 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001737 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1738 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001739 if (hdev->rfkill) {
1740 if (rfkill_register(hdev->rfkill) < 0) {
1741 rfkill_destroy(hdev->rfkill);
1742 hdev->rfkill = NULL;
1743 }
1744 }
1745
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001746 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1747 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001748 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001749
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001751 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
1753 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001754
David Herrmann33ca9542011-10-08 14:58:49 +02001755err_wqueue:
1756 destroy_workqueue(hdev->workqueue);
1757err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001758 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001759 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001760 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001761 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001762
David Herrmann33ca9542011-10-08 14:58:49 +02001763 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764}
1765EXPORT_SYMBOL(hci_register_dev);
1766
1767/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001768void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769{
Sasha Levin3df92b32012-05-27 22:36:56 +02001770 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001771
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001772 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Johan Hovold94324962012-03-15 14:48:41 +01001774 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1775
Sasha Levin3df92b32012-05-27 22:36:56 +02001776 id = hdev->id;
1777
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001778 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001780 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
1782 hci_dev_do_close(hdev);
1783
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301784 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001785 kfree_skb(hdev->reassembly[i]);
1786
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001787 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001788 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001789 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001790 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001791 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001792 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001793
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001794 /* mgmt_index_removed should take care of emptying the
1795 * pending list */
1796 BUG_ON(!list_empty(&hdev->mgmt_pending));
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 hci_notify(hdev, HCI_DEV_UNREG);
1799
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001800 if (hdev->rfkill) {
1801 rfkill_unregister(hdev->rfkill);
1802 rfkill_destroy(hdev->rfkill);
1803 }
1804
David Herrmannce242972011-10-08 14:58:48 +02001805 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001806
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001807 destroy_workqueue(hdev->workqueue);
1808
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001809 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001810 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001811 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001812 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001813 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001814 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001815 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001816
David Herrmanndc946bd2012-01-07 15:47:24 +01001817 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001818
1819 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820}
1821EXPORT_SYMBOL(hci_unregister_dev);
1822
1823/* Suspend HCI device */
1824int hci_suspend_dev(struct hci_dev *hdev)
1825{
1826 hci_notify(hdev, HCI_DEV_SUSPEND);
1827 return 0;
1828}
1829EXPORT_SYMBOL(hci_suspend_dev);
1830
1831/* Resume HCI device */
1832int hci_resume_dev(struct hci_dev *hdev)
1833{
1834 hci_notify(hdev, HCI_DEV_RESUME);
1835 return 0;
1836}
1837EXPORT_SYMBOL(hci_resume_dev);
1838
Marcel Holtmann76bca882009-11-18 00:40:39 +01001839/* Receive frame from HCI drivers */
1840int hci_recv_frame(struct sk_buff *skb)
1841{
1842 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1843 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001844 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001845 kfree_skb(skb);
1846 return -ENXIO;
1847 }
1848
1849 /* Incomming skb */
1850 bt_cb(skb)->incoming = 1;
1851
1852 /* Time stamp */
1853 __net_timestamp(skb);
1854
Marcel Holtmann76bca882009-11-18 00:40:39 +01001855 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001856 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001857
Marcel Holtmann76bca882009-11-18 00:40:39 +01001858 return 0;
1859}
1860EXPORT_SYMBOL(hci_recv_frame);
1861
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301862static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001863 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301864{
1865 int len = 0;
1866 int hlen = 0;
1867 int remain = count;
1868 struct sk_buff *skb;
1869 struct bt_skb_cb *scb;
1870
1871 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001872 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301873 return -EILSEQ;
1874
1875 skb = hdev->reassembly[index];
1876
1877 if (!skb) {
1878 switch (type) {
1879 case HCI_ACLDATA_PKT:
1880 len = HCI_MAX_FRAME_SIZE;
1881 hlen = HCI_ACL_HDR_SIZE;
1882 break;
1883 case HCI_EVENT_PKT:
1884 len = HCI_MAX_EVENT_SIZE;
1885 hlen = HCI_EVENT_HDR_SIZE;
1886 break;
1887 case HCI_SCODATA_PKT:
1888 len = HCI_MAX_SCO_SIZE;
1889 hlen = HCI_SCO_HDR_SIZE;
1890 break;
1891 }
1892
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001893 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301894 if (!skb)
1895 return -ENOMEM;
1896
1897 scb = (void *) skb->cb;
1898 scb->expect = hlen;
1899 scb->pkt_type = type;
1900
1901 skb->dev = (void *) hdev;
1902 hdev->reassembly[index] = skb;
1903 }
1904
1905 while (count) {
1906 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001907 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301908
1909 memcpy(skb_put(skb, len), data, len);
1910
1911 count -= len;
1912 data += len;
1913 scb->expect -= len;
1914 remain = count;
1915
1916 switch (type) {
1917 case HCI_EVENT_PKT:
1918 if (skb->len == HCI_EVENT_HDR_SIZE) {
1919 struct hci_event_hdr *h = hci_event_hdr(skb);
1920 scb->expect = h->plen;
1921
1922 if (skb_tailroom(skb) < scb->expect) {
1923 kfree_skb(skb);
1924 hdev->reassembly[index] = NULL;
1925 return -ENOMEM;
1926 }
1927 }
1928 break;
1929
1930 case HCI_ACLDATA_PKT:
1931 if (skb->len == HCI_ACL_HDR_SIZE) {
1932 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1933 scb->expect = __le16_to_cpu(h->dlen);
1934
1935 if (skb_tailroom(skb) < scb->expect) {
1936 kfree_skb(skb);
1937 hdev->reassembly[index] = NULL;
1938 return -ENOMEM;
1939 }
1940 }
1941 break;
1942
1943 case HCI_SCODATA_PKT:
1944 if (skb->len == HCI_SCO_HDR_SIZE) {
1945 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1946 scb->expect = h->dlen;
1947
1948 if (skb_tailroom(skb) < scb->expect) {
1949 kfree_skb(skb);
1950 hdev->reassembly[index] = NULL;
1951 return -ENOMEM;
1952 }
1953 }
1954 break;
1955 }
1956
1957 if (scb->expect == 0) {
1958 /* Complete frame */
1959
1960 bt_cb(skb)->pkt_type = type;
1961 hci_recv_frame(skb);
1962
1963 hdev->reassembly[index] = NULL;
1964 return remain;
1965 }
1966 }
1967
1968 return remain;
1969}
1970
Marcel Holtmannef222012007-07-11 06:42:04 +02001971int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1972{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301973 int rem = 0;
1974
Marcel Holtmannef222012007-07-11 06:42:04 +02001975 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1976 return -EILSEQ;
1977
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001978 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001979 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301980 if (rem < 0)
1981 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001982
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301983 data += (count - rem);
1984 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001985 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001986
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301987 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001988}
1989EXPORT_SYMBOL(hci_recv_fragment);
1990
Suraj Sumangala99811512010-07-14 13:02:19 +05301991#define STREAM_REASSEMBLY 0
1992
1993int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1994{
1995 int type;
1996 int rem = 0;
1997
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001998 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301999 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2000
2001 if (!skb) {
2002 struct { char type; } *pkt;
2003
2004 /* Start of the frame */
2005 pkt = data;
2006 type = pkt->type;
2007
2008 data++;
2009 count--;
2010 } else
2011 type = bt_cb(skb)->pkt_type;
2012
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002013 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002014 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302015 if (rem < 0)
2016 return rem;
2017
2018 data += (count - rem);
2019 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002020 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302021
2022 return rem;
2023}
2024EXPORT_SYMBOL(hci_recv_stream_fragment);
2025
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026/* ---- Interface to upper protocols ---- */
2027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028int hci_register_cb(struct hci_cb *cb)
2029{
2030 BT_DBG("%p name %s", cb, cb->name);
2031
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002032 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002034 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036 return 0;
2037}
2038EXPORT_SYMBOL(hci_register_cb);
2039
2040int hci_unregister_cb(struct hci_cb *cb)
2041{
2042 BT_DBG("%p name %s", cb, cb->name);
2043
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002044 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002046 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047
2048 return 0;
2049}
2050EXPORT_SYMBOL(hci_unregister_cb);
2051
2052static int hci_send_frame(struct sk_buff *skb)
2053{
2054 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2055
2056 if (!hdev) {
2057 kfree_skb(skb);
2058 return -ENODEV;
2059 }
2060
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002061 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002063 /* Time stamp */
2064 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002066 /* Send copy to monitor */
2067 hci_send_to_monitor(hdev, skb);
2068
2069 if (atomic_read(&hdev->promisc)) {
2070 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002071 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 }
2073
2074 /* Get rid of skb owner, prior to sending to the driver. */
2075 skb_orphan(skb);
2076
2077 return hdev->send(skb);
2078}
2079
2080/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002081int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082{
2083 int len = HCI_COMMAND_HDR_SIZE + plen;
2084 struct hci_command_hdr *hdr;
2085 struct sk_buff *skb;
2086
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002087 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
2089 skb = bt_skb_alloc(len, GFP_ATOMIC);
2090 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002091 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 return -ENOMEM;
2093 }
2094
2095 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002096 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 hdr->plen = plen;
2098
2099 if (plen)
2100 memcpy(skb_put(skb, plen), param, plen);
2101
2102 BT_DBG("skb len %d", skb->len);
2103
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002104 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002106
Johan Hedberga5040ef2011-01-10 13:28:59 +02002107 if (test_bit(HCI_INIT, &hdev->flags))
2108 hdev->init_last_cmd = opcode;
2109
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002111 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
2113 return 0;
2114}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
2116/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002117void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118{
2119 struct hci_command_hdr *hdr;
2120
2121 if (!hdev->sent_cmd)
2122 return NULL;
2123
2124 hdr = (void *) hdev->sent_cmd->data;
2125
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002126 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 return NULL;
2128
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002129 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2132}
2133
2134/* Send ACL data */
2135static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2136{
2137 struct hci_acl_hdr *hdr;
2138 int len = skb->len;
2139
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002140 skb_push(skb, HCI_ACL_HDR_SIZE);
2141 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002142 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002143 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2144 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145}
2146
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002147static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002148 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149{
2150 struct hci_dev *hdev = conn->hdev;
2151 struct sk_buff *list;
2152
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002153 skb->len = skb_headlen(skb);
2154 skb->data_len = 0;
2155
2156 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2157 hci_add_acl_hdr(skb, conn->handle, flags);
2158
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002159 list = skb_shinfo(skb)->frag_list;
2160 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 /* Non fragmented */
2162 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2163
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002164 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 } else {
2166 /* Fragmented */
2167 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2168
2169 skb_shinfo(skb)->frag_list = NULL;
2170
2171 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002172 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002174 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002175
2176 flags &= ~ACL_START;
2177 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 do {
2179 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002180
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002182 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002183 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
2185 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2186
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002187 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 } while (list);
2189
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002190 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002192}
2193
2194void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2195{
2196 struct hci_conn *conn = chan->conn;
2197 struct hci_dev *hdev = conn->hdev;
2198
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002199 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002200
2201 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002202
2203 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002205 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002209void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210{
2211 struct hci_dev *hdev = conn->hdev;
2212 struct hci_sco_hdr hdr;
2213
2214 BT_DBG("%s len %d", hdev->name, skb->len);
2215
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002216 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 hdr.dlen = skb->len;
2218
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002219 skb_push(skb, HCI_SCO_HDR_SIZE);
2220 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002221 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
2223 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002224 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002227 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
2230/* ---- HCI TX task (outgoing data) ---- */
2231
2232/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002233static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2234 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235{
2236 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002237 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002238 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002240 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002242
2243 rcu_read_lock();
2244
2245 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002246 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002248
2249 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2250 continue;
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 num++;
2253
2254 if (c->sent < min) {
2255 min = c->sent;
2256 conn = c;
2257 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002258
2259 if (hci_conn_num(hdev, type) == num)
2260 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 }
2262
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002263 rcu_read_unlock();
2264
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002266 int cnt, q;
2267
2268 switch (conn->type) {
2269 case ACL_LINK:
2270 cnt = hdev->acl_cnt;
2271 break;
2272 case SCO_LINK:
2273 case ESCO_LINK:
2274 cnt = hdev->sco_cnt;
2275 break;
2276 case LE_LINK:
2277 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2278 break;
2279 default:
2280 cnt = 0;
2281 BT_ERR("Unknown link type");
2282 }
2283
2284 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 *quote = q ? q : 1;
2286 } else
2287 *quote = 0;
2288
2289 BT_DBG("conn %p quote %d", conn, *quote);
2290 return conn;
2291}
2292
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002293static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294{
2295 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002296 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Ville Tervobae1f5d92011-02-10 22:38:53 -03002298 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002300 rcu_read_lock();
2301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002303 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002304 if (c->type == type && c->sent) {
2305 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002306 hdev->name, batostr(&c->dst));
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002307 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 }
2309 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002310
2311 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312}
2313
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002314static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2315 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002316{
2317 struct hci_conn_hash *h = &hdev->conn_hash;
2318 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002319 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002320 struct hci_conn *conn;
2321 int cnt, q, conn_num = 0;
2322
2323 BT_DBG("%s", hdev->name);
2324
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002325 rcu_read_lock();
2326
2327 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002328 struct hci_chan *tmp;
2329
2330 if (conn->type != type)
2331 continue;
2332
2333 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2334 continue;
2335
2336 conn_num++;
2337
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002338 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002339 struct sk_buff *skb;
2340
2341 if (skb_queue_empty(&tmp->data_q))
2342 continue;
2343
2344 skb = skb_peek(&tmp->data_q);
2345 if (skb->priority < cur_prio)
2346 continue;
2347
2348 if (skb->priority > cur_prio) {
2349 num = 0;
2350 min = ~0;
2351 cur_prio = skb->priority;
2352 }
2353
2354 num++;
2355
2356 if (conn->sent < min) {
2357 min = conn->sent;
2358 chan = tmp;
2359 }
2360 }
2361
2362 if (hci_conn_num(hdev, type) == conn_num)
2363 break;
2364 }
2365
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002366 rcu_read_unlock();
2367
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002368 if (!chan)
2369 return NULL;
2370
2371 switch (chan->conn->type) {
2372 case ACL_LINK:
2373 cnt = hdev->acl_cnt;
2374 break;
2375 case SCO_LINK:
2376 case ESCO_LINK:
2377 cnt = hdev->sco_cnt;
2378 break;
2379 case LE_LINK:
2380 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2381 break;
2382 default:
2383 cnt = 0;
2384 BT_ERR("Unknown link type");
2385 }
2386
2387 q = cnt / num;
2388 *quote = q ? q : 1;
2389 BT_DBG("chan %p quote %d", chan, *quote);
2390 return chan;
2391}
2392
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002393static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2394{
2395 struct hci_conn_hash *h = &hdev->conn_hash;
2396 struct hci_conn *conn;
2397 int num = 0;
2398
2399 BT_DBG("%s", hdev->name);
2400
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002401 rcu_read_lock();
2402
2403 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002404 struct hci_chan *chan;
2405
2406 if (conn->type != type)
2407 continue;
2408
2409 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2410 continue;
2411
2412 num++;
2413
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002414 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002415 struct sk_buff *skb;
2416
2417 if (chan->sent) {
2418 chan->sent = 0;
2419 continue;
2420 }
2421
2422 if (skb_queue_empty(&chan->data_q))
2423 continue;
2424
2425 skb = skb_peek(&chan->data_q);
2426 if (skb->priority >= HCI_PRIO_MAX - 1)
2427 continue;
2428
2429 skb->priority = HCI_PRIO_MAX - 1;
2430
2431 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002432 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002433 }
2434
2435 if (hci_conn_num(hdev, type) == num)
2436 break;
2437 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002438
2439 rcu_read_unlock();
2440
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002441}
2442
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002443static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2444{
2445 /* Calculate count of blocks used by this packet */
2446 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2447}
2448
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002449static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 if (!test_bit(HCI_RAW, &hdev->flags)) {
2452 /* ACL tx timeout must be longer than maximum
2453 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002454 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002455 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002456 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002458}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002460static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002461{
2462 unsigned int cnt = hdev->acl_cnt;
2463 struct hci_chan *chan;
2464 struct sk_buff *skb;
2465 int quote;
2466
2467 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002468
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002469 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002470 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002471 u32 priority = (skb_peek(&chan->data_q))->priority;
2472 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002473 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002474 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002475
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002476 /* Stop if priority has changed */
2477 if (skb->priority < priority)
2478 break;
2479
2480 skb = skb_dequeue(&chan->data_q);
2481
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002482 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002483 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002484
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 hci_send_frame(skb);
2486 hdev->acl_last_tx = jiffies;
2487
2488 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002489 chan->sent++;
2490 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 }
2492 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002493
2494 if (cnt != hdev->acl_cnt)
2495 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496}
2497
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002498static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002499{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002500 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002501 struct hci_chan *chan;
2502 struct sk_buff *skb;
2503 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002504
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002505 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002506
2507 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002508 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002509 u32 priority = (skb_peek(&chan->data_q))->priority;
2510 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2511 int blocks;
2512
2513 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002514 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002515
2516 /* Stop if priority has changed */
2517 if (skb->priority < priority)
2518 break;
2519
2520 skb = skb_dequeue(&chan->data_q);
2521
2522 blocks = __get_blocks(hdev, skb);
2523 if (blocks > hdev->block_cnt)
2524 return;
2525
2526 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002527 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002528
2529 hci_send_frame(skb);
2530 hdev->acl_last_tx = jiffies;
2531
2532 hdev->block_cnt -= blocks;
2533 quote -= blocks;
2534
2535 chan->sent += blocks;
2536 chan->conn->sent += blocks;
2537 }
2538 }
2539
2540 if (cnt != hdev->block_cnt)
2541 hci_prio_recalculate(hdev, ACL_LINK);
2542}
2543
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002544static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002545{
2546 BT_DBG("%s", hdev->name);
2547
2548 if (!hci_conn_num(hdev, ACL_LINK))
2549 return;
2550
2551 switch (hdev->flow_ctl_mode) {
2552 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2553 hci_sched_acl_pkt(hdev);
2554 break;
2555
2556 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2557 hci_sched_acl_blk(hdev);
2558 break;
2559 }
2560}
2561
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002563static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564{
2565 struct hci_conn *conn;
2566 struct sk_buff *skb;
2567 int quote;
2568
2569 BT_DBG("%s", hdev->name);
2570
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002571 if (!hci_conn_num(hdev, SCO_LINK))
2572 return;
2573
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2575 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2576 BT_DBG("skb %p len %d", skb, skb->len);
2577 hci_send_frame(skb);
2578
2579 conn->sent++;
2580 if (conn->sent == ~0)
2581 conn->sent = 0;
2582 }
2583 }
2584}
2585
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002586static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002587{
2588 struct hci_conn *conn;
2589 struct sk_buff *skb;
2590 int quote;
2591
2592 BT_DBG("%s", hdev->name);
2593
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002594 if (!hci_conn_num(hdev, ESCO_LINK))
2595 return;
2596
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002597 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2598 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002599 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2600 BT_DBG("skb %p len %d", skb, skb->len);
2601 hci_send_frame(skb);
2602
2603 conn->sent++;
2604 if (conn->sent == ~0)
2605 conn->sent = 0;
2606 }
2607 }
2608}
2609
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002610static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002611{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002612 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002613 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002614 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002615
2616 BT_DBG("%s", hdev->name);
2617
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002618 if (!hci_conn_num(hdev, LE_LINK))
2619 return;
2620
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002621 if (!test_bit(HCI_RAW, &hdev->flags)) {
2622 /* LE tx timeout must be longer than maximum
2623 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002624 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002625 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002626 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002627 }
2628
2629 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002630 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002631 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002632 u32 priority = (skb_peek(&chan->data_q))->priority;
2633 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002634 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002635 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002636
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002637 /* Stop if priority has changed */
2638 if (skb->priority < priority)
2639 break;
2640
2641 skb = skb_dequeue(&chan->data_q);
2642
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002643 hci_send_frame(skb);
2644 hdev->le_last_tx = jiffies;
2645
2646 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002647 chan->sent++;
2648 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002649 }
2650 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002651
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002652 if (hdev->le_pkts)
2653 hdev->le_cnt = cnt;
2654 else
2655 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002656
2657 if (cnt != tmp)
2658 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002659}
2660
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002661static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002663 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 struct sk_buff *skb;
2665
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002666 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002667 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668
2669 /* Schedule queues and send stuff to HCI driver */
2670
2671 hci_sched_acl(hdev);
2672
2673 hci_sched_sco(hdev);
2674
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002675 hci_sched_esco(hdev);
2676
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002677 hci_sched_le(hdev);
2678
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 /* Send next queued raw (unknown type) packet */
2680 while ((skb = skb_dequeue(&hdev->raw_q)))
2681 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682}
2683
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002684/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685
2686/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002687static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688{
2689 struct hci_acl_hdr *hdr = (void *) skb->data;
2690 struct hci_conn *conn;
2691 __u16 handle, flags;
2692
2693 skb_pull(skb, HCI_ACL_HDR_SIZE);
2694
2695 handle = __le16_to_cpu(hdr->handle);
2696 flags = hci_flags(handle);
2697 handle = hci_handle(handle);
2698
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002699 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002700 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
2702 hdev->stat.acl_rx++;
2703
2704 hci_dev_lock(hdev);
2705 conn = hci_conn_hash_lookup_handle(hdev, handle);
2706 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002707
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002709 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002710
Johan Hedberg671267b2012-05-12 16:11:50 -03002711 hci_dev_lock(hdev);
2712 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2713 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2714 mgmt_device_connected(hdev, &conn->dst, conn->type,
2715 conn->dst_type, 0, NULL, 0,
2716 conn->dev_class);
2717 hci_dev_unlock(hdev);
2718
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002720 l2cap_recv_acldata(conn, skb, flags);
2721 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002723 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002724 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 }
2726
2727 kfree_skb(skb);
2728}
2729
2730/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002731static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732{
2733 struct hci_sco_hdr *hdr = (void *) skb->data;
2734 struct hci_conn *conn;
2735 __u16 handle;
2736
2737 skb_pull(skb, HCI_SCO_HDR_SIZE);
2738
2739 handle = __le16_to_cpu(hdr->handle);
2740
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002741 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 hdev->stat.sco_rx++;
2744
2745 hci_dev_lock(hdev);
2746 conn = hci_conn_hash_lookup_handle(hdev, handle);
2747 hci_dev_unlock(hdev);
2748
2749 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002751 sco_recv_scodata(conn, skb);
2752 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002754 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002755 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 }
2757
2758 kfree_skb(skb);
2759}
2760
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002761static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002763 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 struct sk_buff *skb;
2765
2766 BT_DBG("%s", hdev->name);
2767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002769 /* Send copy to monitor */
2770 hci_send_to_monitor(hdev, skb);
2771
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 if (atomic_read(&hdev->promisc)) {
2773 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002774 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 }
2776
2777 if (test_bit(HCI_RAW, &hdev->flags)) {
2778 kfree_skb(skb);
2779 continue;
2780 }
2781
2782 if (test_bit(HCI_INIT, &hdev->flags)) {
2783 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002784 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 case HCI_ACLDATA_PKT:
2786 case HCI_SCODATA_PKT:
2787 kfree_skb(skb);
2788 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 }
2791
2792 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002793 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002795 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 hci_event_packet(hdev, skb);
2797 break;
2798
2799 case HCI_ACLDATA_PKT:
2800 BT_DBG("%s ACL data packet", hdev->name);
2801 hci_acldata_packet(hdev, skb);
2802 break;
2803
2804 case HCI_SCODATA_PKT:
2805 BT_DBG("%s SCO data packet", hdev->name);
2806 hci_scodata_packet(hdev, skb);
2807 break;
2808
2809 default:
2810 kfree_skb(skb);
2811 break;
2812 }
2813 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814}
2815
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002816static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002818 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 struct sk_buff *skb;
2820
2821 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2822
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002824 if (atomic_read(&hdev->cmd_cnt)) {
2825 skb = skb_dequeue(&hdev->cmd_q);
2826 if (!skb)
2827 return;
2828
Wei Yongjun7585b972009-02-25 18:29:52 +08002829 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002831 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2832 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 atomic_dec(&hdev->cmd_cnt);
2834 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002835 if (test_bit(HCI_RESET, &hdev->flags))
2836 del_timer(&hdev->cmd_timer);
2837 else
2838 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002839 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 } else {
2841 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002842 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 }
2844 }
2845}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002846
2847int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2848{
2849 /* General inquiry access code (GIAC) */
2850 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2851 struct hci_cp_inquiry cp;
2852
2853 BT_DBG("%s", hdev->name);
2854
2855 if (test_bit(HCI_INQUIRY, &hdev->flags))
2856 return -EINPROGRESS;
2857
Johan Hedberg46632622012-01-02 16:06:08 +02002858 inquiry_cache_flush(hdev);
2859
Andre Guedes2519a1f2011-11-07 11:45:24 -03002860 memset(&cp, 0, sizeof(cp));
2861 memcpy(&cp.lap, lap, sizeof(cp.lap));
2862 cp.length = length;
2863
2864 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2865}
Andre Guedes023d50492011-11-04 14:16:52 -03002866
2867int hci_cancel_inquiry(struct hci_dev *hdev)
2868{
2869 BT_DBG("%s", hdev->name);
2870
2871 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002872 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002873
2874 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2875}
Andre Guedes31f79562012-04-24 21:02:53 -03002876
2877u8 bdaddr_to_le(u8 bdaddr_type)
2878{
2879 switch (bdaddr_type) {
2880 case BDADDR_LE_PUBLIC:
2881 return ADDR_LE_DEV_PUBLIC;
2882
2883 default:
2884 /* Fallback to LE Random address type */
2885 return ADDR_LE_DEV_RANDOM;
2886 }
2887}