blob: 854202679c492057cc263368e459e4196961043c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700134 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Johan Hedberga5040ef2011-01-10 13:28:59 +0200146 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 int ret;
158
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300175 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200179static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200186 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void amp_init(struct hci_dev *hdev)
191{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202}
203
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205{
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242{
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252{
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262{
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200267 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269}
270
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272{
273 __le16 policy = cpu_to_le16(opt);
274
Marcel Holtmanna418b892008-11-30 12:17:28 +0100275 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279}
280
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900281/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 * Device is held on return. */
283struct hci_dev *hci_dev_get(int index)
284{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200285 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200293 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200304
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200305bool hci_discovery_active(struct hci_dev *hdev)
306{
307 struct discovery_state *discov = &hdev->discovery;
308
Andre Guedes6fbe1952012-02-03 17:47:58 -0300309 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300310 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300311 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200312 return true;
313
Andre Guedes6fbe1952012-02-03 17:47:58 -0300314 default:
315 return false;
316 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200317}
318
Johan Hedbergff9ef572012-01-04 14:23:45 +0200319void hci_discovery_set_state(struct hci_dev *hdev, int state)
320{
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200330 break;
331 case DISCOVERY_STARTING:
332 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300333 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200334 mgmt_discovering(hdev, 1);
335 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200336 case DISCOVERY_RESOLVING:
337 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343}
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
Johan Hedberg30883512012-01-04 14:16:21 +0200347 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200348 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Johan Hedberg561aafb2012-01-04 13:31:59 +0200350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200352 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Johan Hedberg30883512012-01-04 14:16:21 +0200362 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 struct inquiry_entry *e;
364
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300365 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Johan Hedberg561aafb2012-01-04 13:31:59 +0200367 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200369 return e;
370 }
371
372 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
374
Johan Hedberg561aafb2012-01-04 13:31:59 +0200375struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300376 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200377{
Johan Hedberg30883512012-01-04 14:16:21 +0200378 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200379 struct inquiry_entry *e;
380
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300381 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200382
383 list_for_each_entry(e, &cache->unknown, list) {
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 return e;
386 }
387
388 return NULL;
389}
390
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200391struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300392 bdaddr_t *bdaddr,
393 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200394{
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408}
409
Johan Hedberga3d4e202012-01-09 00:53:02 +0200410void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300411 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200412{
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300421 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427}
428
Johan Hedberg31754052012-01-04 13:39:52 +0200429bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300430 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431{
Johan Hedberg30883512012-01-04 14:16:21 +0200432 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200433 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200437 if (ssp)
438 *ssp = data->ssp_mode;
439
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200440 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200441 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200442 if (ie->data.ssp_mode && ssp)
443 *ssp = true;
444
Johan Hedberga3d4e202012-01-09 00:53:02 +0200445 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300446 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200447 ie->data.rssi = data->rssi;
448 hci_inquiry_cache_update_resolve(hdev, ie);
449 }
450
Johan Hedberg561aafb2012-01-04 13:31:59 +0200451 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200452 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200453
Johan Hedberg561aafb2012-01-04 13:31:59 +0200454 /* Entry not in the cache. Add new one. */
455 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
456 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200457 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200458
459 list_add(&ie->all, &cache->all);
460
461 if (name_known) {
462 ie->name_state = NAME_KNOWN;
463 } else {
464 ie->name_state = NAME_NOT_KNOWN;
465 list_add(&ie->list, &cache->unknown);
466 }
467
468update:
469 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300470 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200471 ie->name_state = NAME_KNOWN;
472 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
474
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200475 memcpy(&ie->data, data, sizeof(*data));
476 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200478
479 if (ie->name_state == NAME_NOT_KNOWN)
480 return false;
481
482 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
485static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
486{
Johan Hedberg30883512012-01-04 14:16:21 +0200487 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 struct inquiry_info *info = (struct inquiry_info *) buf;
489 struct inquiry_entry *e;
490 int copied = 0;
491
Johan Hedberg561aafb2012-01-04 13:31:59 +0200492 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200494
495 if (copied >= num)
496 break;
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 bacpy(&info->bdaddr, &data->bdaddr);
499 info->pscan_rep_mode = data->pscan_rep_mode;
500 info->pscan_period_mode = data->pscan_period_mode;
501 info->pscan_mode = data->pscan_mode;
502 memcpy(info->dev_class, data->dev_class, 3);
503 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200506 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508
509 BT_DBG("cache %p, copied %d", cache, copied);
510 return copied;
511}
512
513static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
514{
515 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
516 struct hci_cp_inquiry cp;
517
518 BT_DBG("%s", hdev->name);
519
520 if (test_bit(HCI_INQUIRY, &hdev->flags))
521 return;
522
523 /* Start Inquiry */
524 memcpy(&cp.lap, &ir->lap, 3);
525 cp.length = ir->length;
526 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200527 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528}
529
530int hci_inquiry(void __user *arg)
531{
532 __u8 __user *ptr = arg;
533 struct hci_inquiry_req ir;
534 struct hci_dev *hdev;
535 int err = 0, do_inquiry = 0, max_rsp;
536 long timeo;
537 __u8 *buf;
538
539 if (copy_from_user(&ir, ptr, sizeof(ir)))
540 return -EFAULT;
541
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200542 hdev = hci_dev_get(ir.dev_id);
543 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 return -ENODEV;
545
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300546 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900547 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300548 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 inquiry_cache_flush(hdev);
550 do_inquiry = 1;
551 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300552 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Marcel Holtmann04837f62006-07-03 10:02:33 +0200554 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200555
556 if (do_inquiry) {
557 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
558 if (err < 0)
559 goto done;
560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300562 /* for unlimited number of responses we will use buffer with
563 * 255 entries
564 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
566
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
569 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100570 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200571 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 err = -ENOMEM;
573 goto done;
574 }
575
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300576 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300578 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 BT_DBG("num_rsp %d", ir.num_rsp);
581
582 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
583 ptr += sizeof(ir);
584 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300585 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900587 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 err = -EFAULT;
589
590 kfree(buf);
591
592done:
593 hci_dev_put(hdev);
594 return err;
595}
596
597/* ---- HCI ioctl helpers ---- */
598
599int hci_dev_open(__u16 dev)
600{
601 struct hci_dev *hdev;
602 int ret = 0;
603
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200604 hdev = hci_dev_get(dev);
605 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 return -ENODEV;
607
608 BT_DBG("%s %p", hdev->name, hdev);
609
610 hci_req_lock(hdev);
611
Johan Hovold94324962012-03-15 14:48:41 +0100612 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
613 ret = -ENODEV;
614 goto done;
615 }
616
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200617 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
618 ret = -ERFKILL;
619 goto done;
620 }
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 if (test_bit(HCI_UP, &hdev->flags)) {
623 ret = -EALREADY;
624 goto done;
625 }
626
627 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
628 set_bit(HCI_RAW, &hdev->flags);
629
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200630 /* Treat all non BR/EDR controllers as raw devices if
631 enable_hs is not set */
632 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100633 set_bit(HCI_RAW, &hdev->flags);
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (hdev->open(hdev)) {
636 ret = -EIO;
637 goto done;
638 }
639
640 if (!test_bit(HCI_RAW, &hdev->flags)) {
641 atomic_set(&hdev->cmd_cnt, 1);
642 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200643 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300645 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 clear_bit(HCI_INIT, &hdev->flags);
648 }
649
650 if (!ret) {
651 hci_dev_hold(hdev);
652 set_bit(HCI_UP, &hdev->flags);
653 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300654 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
655 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300656 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200657 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300658 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200659 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900660 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200662 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200663 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400664 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666 skb_queue_purge(&hdev->cmd_q);
667 skb_queue_purge(&hdev->rx_q);
668
669 if (hdev->flush)
670 hdev->flush(hdev);
671
672 if (hdev->sent_cmd) {
673 kfree_skb(hdev->sent_cmd);
674 hdev->sent_cmd = NULL;
675 }
676
677 hdev->close(hdev);
678 hdev->flags = 0;
679 }
680
681done:
682 hci_req_unlock(hdev);
683 hci_dev_put(hdev);
684 return ret;
685}
686
687static int hci_dev_do_close(struct hci_dev *hdev)
688{
689 BT_DBG("%s %p", hdev->name, hdev);
690
Andre Guedes28b75a82012-02-03 17:48:00 -0300691 cancel_work_sync(&hdev->le_scan);
692
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -0300693 cancel_delayed_work(&hdev->power_off);
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 hci_req_cancel(hdev, ENODEV);
696 hci_req_lock(hdev);
697
698 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300699 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 hci_req_unlock(hdev);
701 return 0;
702 }
703
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200704 /* Flush RX and TX works */
705 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400706 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200708 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200709 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200710 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200711 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200712 }
713
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200714 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200715 cancel_delayed_work(&hdev->service_cache);
716
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300717 cancel_delayed_work_sync(&hdev->le_scan_disable);
718
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300719 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 inquiry_cache_flush(hdev);
721 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300722 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 hci_notify(hdev, HCI_DEV_DOWN);
725
726 if (hdev->flush)
727 hdev->flush(hdev);
728
729 /* Reset device */
730 skb_queue_purge(&hdev->cmd_q);
731 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200732 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200733 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 set_bit(HCI_INIT, &hdev->flags);
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300735 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 clear_bit(HCI_INIT, &hdev->flags);
737 }
738
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200739 /* flush cmd work */
740 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742 /* Drop queues */
743 skb_queue_purge(&hdev->rx_q);
744 skb_queue_purge(&hdev->cmd_q);
745 skb_queue_purge(&hdev->raw_q);
746
747 /* Drop last sent command */
748 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300749 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 kfree_skb(hdev->sent_cmd);
751 hdev->sent_cmd = NULL;
752 }
753
754 /* After this point our queues are empty
755 * and no tasks are scheduled. */
756 hdev->close(hdev);
757
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300758 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
759 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100760 hci_dev_lock(hdev);
761 mgmt_powered(hdev, 0);
762 hci_dev_unlock(hdev);
763 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 /* Clear flags */
766 hdev->flags = 0;
767
Johan Hedberge59fda82012-02-22 18:11:53 +0200768 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200769 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200770
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 hci_req_unlock(hdev);
772
773 hci_dev_put(hdev);
774 return 0;
775}
776
777int hci_dev_close(__u16 dev)
778{
779 struct hci_dev *hdev;
780 int err;
781
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200782 hdev = hci_dev_get(dev);
783 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100785
786 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
787 cancel_delayed_work(&hdev->power_off);
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 hci_dev_put(hdev);
792 return err;
793}
794
795int hci_dev_reset(__u16 dev)
796{
797 struct hci_dev *hdev;
798 int ret = 0;
799
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200800 hdev = hci_dev_get(dev);
801 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 return -ENODEV;
803
804 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
806 if (!test_bit(HCI_UP, &hdev->flags))
807 goto done;
808
809 /* Drop queues */
810 skb_queue_purge(&hdev->rx_q);
811 skb_queue_purge(&hdev->cmd_q);
812
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300813 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 inquiry_cache_flush(hdev);
815 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300816 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
818 if (hdev->flush)
819 hdev->flush(hdev);
820
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900821 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300822 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824 if (!test_bit(HCI_RAW, &hdev->flags))
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300825 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
827done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 hci_req_unlock(hdev);
829 hci_dev_put(hdev);
830 return ret;
831}
832
833int hci_dev_reset_stat(__u16 dev)
834{
835 struct hci_dev *hdev;
836 int ret = 0;
837
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200838 hdev = hci_dev_get(dev);
839 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 return -ENODEV;
841
842 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
843
844 hci_dev_put(hdev);
845
846 return ret;
847}
848
849int hci_dev_cmd(unsigned int cmd, void __user *arg)
850{
851 struct hci_dev *hdev;
852 struct hci_dev_req dr;
853 int err = 0;
854
855 if (copy_from_user(&dr, arg, sizeof(dr)))
856 return -EFAULT;
857
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200858 hdev = hci_dev_get(dr.dev_id);
859 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 return -ENODEV;
861
862 switch (cmd) {
863 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200864 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300865 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 break;
867
868 case HCISETENCRYPT:
869 if (!lmp_encrypt_capable(hdev)) {
870 err = -EOPNOTSUPP;
871 break;
872 }
873
874 if (!test_bit(HCI_AUTH, &hdev->flags)) {
875 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200876 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300877 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 if (err)
879 break;
880 }
881
Marcel Holtmann04837f62006-07-03 10:02:33 +0200882 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300883 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 break;
885
886 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200887 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300888 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 break;
890
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200891 case HCISETLINKPOL:
892 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300893 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200894 break;
895
896 case HCISETLINKMODE:
897 hdev->link_mode = ((__u16) dr.dev_opt) &
898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
899 break;
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 case HCISETPTYPE:
902 hdev->pkt_type = (__u16) dr.dev_opt;
903 break;
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 break;
909
910 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914
915 default:
916 err = -EINVAL;
917 break;
918 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 hci_dev_put(hdev);
921 return err;
922}
923
924int hci_get_dev_list(void __user *arg)
925{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200926 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 struct hci_dev_list_req *dl;
928 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 int n = 0, size, err;
930 __u16 dev_num;
931
932 if (get_user(dev_num, (__u16 __user *) arg))
933 return -EFAULT;
934
935 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
936 return -EINVAL;
937
938 size = sizeof(*dl) + dev_num * sizeof(*dr);
939
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200940 dl = kzalloc(size, GFP_KERNEL);
941 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 return -ENOMEM;
943
944 dr = dl->dev_req;
945
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200946 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200947 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200948 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200949 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200950
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200951 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
952 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 (dr + n)->dev_id = hdev->id;
955 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 if (++n >= dev_num)
958 break;
959 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200960 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
962 dl->dev_num = n;
963 size = sizeof(*dl) + n * sizeof(*dr);
964
965 err = copy_to_user(arg, dl, size);
966 kfree(dl);
967
968 return err ? -EFAULT : 0;
969}
970
971int hci_get_dev_info(void __user *arg)
972{
973 struct hci_dev *hdev;
974 struct hci_dev_info di;
975 int err = 0;
976
977 if (copy_from_user(&di, arg, sizeof(di)))
978 return -EFAULT;
979
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200980 hdev = hci_dev_get(di.dev_id);
981 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 return -ENODEV;
983
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200984 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +0200985 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200986
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
988 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 strcpy(di.name, hdev->name);
991 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100992 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 di.flags = hdev->flags;
994 di.pkt_type = hdev->pkt_type;
995 di.acl_mtu = hdev->acl_mtu;
996 di.acl_pkts = hdev->acl_pkts;
997 di.sco_mtu = hdev->sco_mtu;
998 di.sco_pkts = hdev->sco_pkts;
999 di.link_policy = hdev->link_policy;
1000 di.link_mode = hdev->link_mode;
1001
1002 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1003 memcpy(&di.features, &hdev->features, sizeof(di.features));
1004
1005 if (copy_to_user(arg, &di, sizeof(di)))
1006 err = -EFAULT;
1007
1008 hci_dev_put(hdev);
1009
1010 return err;
1011}
1012
1013/* ---- Interface to HCI drivers ---- */
1014
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001015static int hci_rfkill_set_block(void *data, bool blocked)
1016{
1017 struct hci_dev *hdev = data;
1018
1019 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1020
1021 if (!blocked)
1022 return 0;
1023
1024 hci_dev_do_close(hdev);
1025
1026 return 0;
1027}
1028
1029static const struct rfkill_ops hci_rfkill_ops = {
1030 .set_block = hci_rfkill_set_block,
1031};
1032
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001033static void hci_power_on(struct work_struct *work)
1034{
1035 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1036
1037 BT_DBG("%s", hdev->name);
1038
1039 if (hci_dev_open(hdev->id) < 0)
1040 return;
1041
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001042 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Andrei Emeltchenko9345d402012-06-15 10:36:42 +03001043 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001044
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001045 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001046 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001047}
1048
1049static void hci_power_off(struct work_struct *work)
1050{
Johan Hedberg32435532011-11-07 22:16:04 +02001051 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001052 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001053
1054 BT_DBG("%s", hdev->name);
1055
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001056 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001057}
1058
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001059static void hci_discov_off(struct work_struct *work)
1060{
1061 struct hci_dev *hdev;
1062 u8 scan = SCAN_PAGE;
1063
1064 hdev = container_of(work, struct hci_dev, discov_off.work);
1065
1066 BT_DBG("%s", hdev->name);
1067
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001068 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001069
1070 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1071
1072 hdev->discov_timeout = 0;
1073
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001074 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001075}
1076
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001077int hci_uuids_clear(struct hci_dev *hdev)
1078{
1079 struct list_head *p, *n;
1080
1081 list_for_each_safe(p, n, &hdev->uuids) {
1082 struct bt_uuid *uuid;
1083
1084 uuid = list_entry(p, struct bt_uuid, list);
1085
1086 list_del(p);
1087 kfree(uuid);
1088 }
1089
1090 return 0;
1091}
1092
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001093int hci_link_keys_clear(struct hci_dev *hdev)
1094{
1095 struct list_head *p, *n;
1096
1097 list_for_each_safe(p, n, &hdev->link_keys) {
1098 struct link_key *key;
1099
1100 key = list_entry(p, struct link_key, list);
1101
1102 list_del(p);
1103 kfree(key);
1104 }
1105
1106 return 0;
1107}
1108
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001109int hci_smp_ltks_clear(struct hci_dev *hdev)
1110{
1111 struct smp_ltk *k, *tmp;
1112
1113 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1114 list_del(&k->list);
1115 kfree(k);
1116 }
1117
1118 return 0;
1119}
1120
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001121struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1122{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001123 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001124
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001125 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001126 if (bacmp(bdaddr, &k->bdaddr) == 0)
1127 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001128
1129 return NULL;
1130}
1131
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301132static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001133 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001134{
1135 /* Legacy key */
1136 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301137 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001138
1139 /* Debug keys are insecure so don't store them persistently */
1140 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301141 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001142
1143 /* Changed combination key and there's no previous one */
1144 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301145 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001146
1147 /* Security mode 3 case */
1148 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301149 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001150
1151 /* Neither local nor remote side had no-bonding as requirement */
1152 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301153 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001154
1155 /* Local side had dedicated bonding as requirement */
1156 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301157 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001158
1159 /* Remote side had dedicated bonding as requirement */
1160 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301161 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001162
1163 /* If none of the above criteria match, then don't store the key
1164 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301165 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001166}
1167
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001168struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001169{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001170 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001171
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001172 list_for_each_entry(k, &hdev->long_term_keys, list) {
1173 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001174 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001175 continue;
1176
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001177 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001178 }
1179
1180 return NULL;
1181}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001182
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001183struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001184 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001185{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001186 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001187
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001188 list_for_each_entry(k, &hdev->long_term_keys, list)
1189 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001190 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001191 return k;
1192
1193 return NULL;
1194}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001195
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001196int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001197 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001198{
1199 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301200 u8 old_key_type;
1201 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001202
1203 old_key = hci_find_link_key(hdev, bdaddr);
1204 if (old_key) {
1205 old_key_type = old_key->type;
1206 key = old_key;
1207 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001208 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001209 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1210 if (!key)
1211 return -ENOMEM;
1212 list_add(&key->list, &hdev->link_keys);
1213 }
1214
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001215 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001216
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001217 /* Some buggy controller combinations generate a changed
1218 * combination key for legacy pairing even when there's no
1219 * previous key */
1220 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001221 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001222 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001223 if (conn)
1224 conn->key_type = type;
1225 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001226
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001227 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001228 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001229 key->pin_len = pin_len;
1230
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001231 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001232 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001233 else
1234 key->type = type;
1235
Johan Hedberg4df378a2011-04-28 11:29:03 -07001236 if (!new_key)
1237 return 0;
1238
1239 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1240
Johan Hedberg744cf192011-11-08 20:40:14 +02001241 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001242
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301243 if (conn)
1244 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001245
1246 return 0;
1247}
1248
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001249int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001250 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001251 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001252{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001253 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001254
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001255 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1256 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001257
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001258 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1259 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001260 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001261 else {
1262 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001263 if (!key)
1264 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001265 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001266 }
1267
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001268 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001269 key->bdaddr_type = addr_type;
1270 memcpy(key->val, tk, sizeof(key->val));
1271 key->authenticated = authenticated;
1272 key->ediv = ediv;
1273 key->enc_size = enc_size;
1274 key->type = type;
1275 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001276
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001277 if (!new_key)
1278 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001279
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001280 if (type & HCI_SMP_LTK)
1281 mgmt_new_ltk(hdev, key, 1);
1282
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001283 return 0;
1284}
1285
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001286int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1287{
1288 struct link_key *key;
1289
1290 key = hci_find_link_key(hdev, bdaddr);
1291 if (!key)
1292 return -ENOENT;
1293
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001294 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001295
1296 list_del(&key->list);
1297 kfree(key);
1298
1299 return 0;
1300}
1301
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001302int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1303{
1304 struct smp_ltk *k, *tmp;
1305
1306 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1307 if (bacmp(bdaddr, &k->bdaddr))
1308 continue;
1309
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001310 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001311
1312 list_del(&k->list);
1313 kfree(k);
1314 }
1315
1316 return 0;
1317}
1318
Ville Tervo6bd32322011-02-16 16:32:41 +02001319/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001320static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001321{
1322 struct hci_dev *hdev = (void *) arg;
1323
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001324 if (hdev->sent_cmd) {
1325 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1326 u16 opcode = __le16_to_cpu(sent->opcode);
1327
1328 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1329 } else {
1330 BT_ERR("%s command tx timeout", hdev->name);
1331 }
1332
Ville Tervo6bd32322011-02-16 16:32:41 +02001333 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001334 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001335}
1336
Szymon Janc2763eda2011-03-22 13:12:22 +01001337struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001338 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001339{
1340 struct oob_data *data;
1341
1342 list_for_each_entry(data, &hdev->remote_oob_data, list)
1343 if (bacmp(bdaddr, &data->bdaddr) == 0)
1344 return data;
1345
1346 return NULL;
1347}
1348
1349int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1350{
1351 struct oob_data *data;
1352
1353 data = hci_find_remote_oob_data(hdev, bdaddr);
1354 if (!data)
1355 return -ENOENT;
1356
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001357 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001358
1359 list_del(&data->list);
1360 kfree(data);
1361
1362 return 0;
1363}
1364
1365int hci_remote_oob_data_clear(struct hci_dev *hdev)
1366{
1367 struct oob_data *data, *n;
1368
1369 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1370 list_del(&data->list);
1371 kfree(data);
1372 }
1373
1374 return 0;
1375}
1376
1377int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001378 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001379{
1380 struct oob_data *data;
1381
1382 data = hci_find_remote_oob_data(hdev, bdaddr);
1383
1384 if (!data) {
1385 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1386 if (!data)
1387 return -ENOMEM;
1388
1389 bacpy(&data->bdaddr, bdaddr);
1390 list_add(&data->list, &hdev->remote_oob_data);
1391 }
1392
1393 memcpy(data->hash, hash, sizeof(data->hash));
1394 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1395
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001396 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001397
1398 return 0;
1399}
1400
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001401struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001402{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001403 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001404
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001405 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001406 if (bacmp(bdaddr, &b->bdaddr) == 0)
1407 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001408
1409 return NULL;
1410}
1411
1412int hci_blacklist_clear(struct hci_dev *hdev)
1413{
1414 struct list_head *p, *n;
1415
1416 list_for_each_safe(p, n, &hdev->blacklist) {
1417 struct bdaddr_list *b;
1418
1419 b = list_entry(p, struct bdaddr_list, list);
1420
1421 list_del(p);
1422 kfree(b);
1423 }
1424
1425 return 0;
1426}
1427
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001428int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001429{
1430 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001431
1432 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1433 return -EBADF;
1434
Antti Julku5e762442011-08-25 16:48:02 +03001435 if (hci_blacklist_lookup(hdev, bdaddr))
1436 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001437
1438 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001439 if (!entry)
1440 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001441
1442 bacpy(&entry->bdaddr, bdaddr);
1443
1444 list_add(&entry->list, &hdev->blacklist);
1445
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001446 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001447}
1448
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001449int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001450{
1451 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001452
Szymon Janc1ec918c2011-11-16 09:32:21 +01001453 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001454 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001455
1456 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001457 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001458 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001459
1460 list_del(&entry->list);
1461 kfree(entry);
1462
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001463 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001464}
1465
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001466static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1467{
1468 struct le_scan_params *param = (struct le_scan_params *) opt;
1469 struct hci_cp_le_set_scan_param cp;
1470
1471 memset(&cp, 0, sizeof(cp));
1472 cp.type = param->type;
1473 cp.interval = cpu_to_le16(param->interval);
1474 cp.window = cpu_to_le16(param->window);
1475
1476 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1477}
1478
1479static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1480{
1481 struct hci_cp_le_set_scan_enable cp;
1482
1483 memset(&cp, 0, sizeof(cp));
1484 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001485 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001486
1487 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1488}
1489
1490static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001491 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001492{
1493 long timeo = msecs_to_jiffies(3000);
1494 struct le_scan_params param;
1495 int err;
1496
1497 BT_DBG("%s", hdev->name);
1498
1499 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1500 return -EINPROGRESS;
1501
1502 param.type = type;
1503 param.interval = interval;
1504 param.window = window;
1505
1506 hci_req_lock(hdev);
1507
1508 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001509 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001510 if (!err)
1511 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1512
1513 hci_req_unlock(hdev);
1514
1515 if (err < 0)
1516 return err;
1517
1518 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001519 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001520
1521 return 0;
1522}
1523
Andre Guedes7dbfac12012-03-15 16:52:07 -03001524int hci_cancel_le_scan(struct hci_dev *hdev)
1525{
1526 BT_DBG("%s", hdev->name);
1527
1528 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1529 return -EALREADY;
1530
1531 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1532 struct hci_cp_le_set_scan_enable cp;
1533
1534 /* Send HCI command to disable LE Scan */
1535 memset(&cp, 0, sizeof(cp));
1536 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1537 }
1538
1539 return 0;
1540}
1541
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001542static void le_scan_disable_work(struct work_struct *work)
1543{
1544 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001545 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001546 struct hci_cp_le_set_scan_enable cp;
1547
1548 BT_DBG("%s", hdev->name);
1549
1550 memset(&cp, 0, sizeof(cp));
1551
1552 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1553}
1554
Andre Guedes28b75a82012-02-03 17:48:00 -03001555static void le_scan_work(struct work_struct *work)
1556{
1557 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1558 struct le_scan_params *param = &hdev->le_scan_params;
1559
1560 BT_DBG("%s", hdev->name);
1561
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001562 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1563 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001564}
1565
1566int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001567 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001568{
1569 struct le_scan_params *param = &hdev->le_scan_params;
1570
1571 BT_DBG("%s", hdev->name);
1572
1573 if (work_busy(&hdev->le_scan))
1574 return -EINPROGRESS;
1575
1576 param->type = type;
1577 param->interval = interval;
1578 param->window = window;
1579 param->timeout = timeout;
1580
1581 queue_work(system_long_wq, &hdev->le_scan);
1582
1583 return 0;
1584}
1585
David Herrmann9be0dab2012-04-22 14:39:57 +02001586/* Alloc HCI device */
1587struct hci_dev *hci_alloc_dev(void)
1588{
1589 struct hci_dev *hdev;
1590
1591 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1592 if (!hdev)
1593 return NULL;
1594
David Herrmannb1b813d2012-04-22 14:39:58 +02001595 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1596 hdev->esco_type = (ESCO_HV1);
1597 hdev->link_mode = (HCI_LM_ACCEPT);
1598 hdev->io_capability = 0x03; /* No Input No Output */
1599
David Herrmannb1b813d2012-04-22 14:39:58 +02001600 hdev->sniff_max_interval = 800;
1601 hdev->sniff_min_interval = 80;
1602
1603 mutex_init(&hdev->lock);
1604 mutex_init(&hdev->req_lock);
1605
1606 INIT_LIST_HEAD(&hdev->mgmt_pending);
1607 INIT_LIST_HEAD(&hdev->blacklist);
1608 INIT_LIST_HEAD(&hdev->uuids);
1609 INIT_LIST_HEAD(&hdev->link_keys);
1610 INIT_LIST_HEAD(&hdev->long_term_keys);
1611 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001612 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001613
1614 INIT_WORK(&hdev->rx_work, hci_rx_work);
1615 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1616 INIT_WORK(&hdev->tx_work, hci_tx_work);
1617 INIT_WORK(&hdev->power_on, hci_power_on);
1618 INIT_WORK(&hdev->le_scan, le_scan_work);
1619
David Herrmannb1b813d2012-04-22 14:39:58 +02001620 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1621 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1622 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1623
David Herrmann9be0dab2012-04-22 14:39:57 +02001624 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001625 skb_queue_head_init(&hdev->rx_q);
1626 skb_queue_head_init(&hdev->cmd_q);
1627 skb_queue_head_init(&hdev->raw_q);
1628
1629 init_waitqueue_head(&hdev->req_wait_q);
1630
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001631 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001632
David Herrmannb1b813d2012-04-22 14:39:58 +02001633 hci_init_sysfs(hdev);
1634 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001635
1636 return hdev;
1637}
1638EXPORT_SYMBOL(hci_alloc_dev);
1639
1640/* Free HCI device */
1641void hci_free_dev(struct hci_dev *hdev)
1642{
1643 skb_queue_purge(&hdev->driver_init);
1644
1645 /* will free via device release */
1646 put_device(&hdev->dev);
1647}
1648EXPORT_SYMBOL(hci_free_dev);
1649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650/* Register HCI device */
1651int hci_register_dev(struct hci_dev *hdev)
1652{
David Herrmannb1b813d2012-04-22 14:39:58 +02001653 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
David Herrmann010666a2012-01-07 15:47:07 +01001655 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 return -EINVAL;
1657
Mat Martineau08add512011-11-02 16:18:36 -07001658 /* Do not allow HCI_AMP devices to register at index 0,
1659 * so the index can be used as the AMP controller ID.
1660 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001661 switch (hdev->dev_type) {
1662 case HCI_BREDR:
1663 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1664 break;
1665 case HCI_AMP:
1666 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1667 break;
1668 default:
1669 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001671
Sasha Levin3df92b32012-05-27 22:36:56 +02001672 if (id < 0)
1673 return id;
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 sprintf(hdev->name, "hci%d", id);
1676 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001677
1678 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1679
Sasha Levin3df92b32012-05-27 22:36:56 +02001680 write_lock(&hci_dev_list_lock);
1681 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001682 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001684 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001685 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001686 if (!hdev->workqueue) {
1687 error = -ENOMEM;
1688 goto err;
1689 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001690
David Herrmann33ca9542011-10-08 14:58:49 +02001691 error = hci_add_sysfs(hdev);
1692 if (error < 0)
1693 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001695 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001696 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1697 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001698 if (hdev->rfkill) {
1699 if (rfkill_register(hdev->rfkill) < 0) {
1700 rfkill_destroy(hdev->rfkill);
1701 hdev->rfkill = NULL;
1702 }
1703 }
1704
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001705 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03001706
1707 if (hdev->dev_type != HCI_AMP)
1708 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1709
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001710 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001713 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
1715 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001716
David Herrmann33ca9542011-10-08 14:58:49 +02001717err_wqueue:
1718 destroy_workqueue(hdev->workqueue);
1719err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001720 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001721 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001722 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001723 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001724
David Herrmann33ca9542011-10-08 14:58:49 +02001725 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726}
1727EXPORT_SYMBOL(hci_register_dev);
1728
1729/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001730void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731{
Sasha Levin3df92b32012-05-27 22:36:56 +02001732 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001733
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001734 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
Johan Hovold94324962012-03-15 14:48:41 +01001736 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1737
Sasha Levin3df92b32012-05-27 22:36:56 +02001738 id = hdev->id;
1739
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001740 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001742 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
1744 hci_dev_do_close(hdev);
1745
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301746 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001747 kfree_skb(hdev->reassembly[i]);
1748
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001749 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001750 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001751 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001752 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001753 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001754 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001755
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001756 /* mgmt_index_removed should take care of emptying the
1757 * pending list */
1758 BUG_ON(!list_empty(&hdev->mgmt_pending));
1759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 hci_notify(hdev, HCI_DEV_UNREG);
1761
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001762 if (hdev->rfkill) {
1763 rfkill_unregister(hdev->rfkill);
1764 rfkill_destroy(hdev->rfkill);
1765 }
1766
David Herrmannce242972011-10-08 14:58:48 +02001767 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001768
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001769 destroy_workqueue(hdev->workqueue);
1770
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001771 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001772 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001773 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001774 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001775 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001776 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001777 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001778
David Herrmanndc946bd2012-01-07 15:47:24 +01001779 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001780
1781 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782}
1783EXPORT_SYMBOL(hci_unregister_dev);
1784
1785/* Suspend HCI device */
1786int hci_suspend_dev(struct hci_dev *hdev)
1787{
1788 hci_notify(hdev, HCI_DEV_SUSPEND);
1789 return 0;
1790}
1791EXPORT_SYMBOL(hci_suspend_dev);
1792
1793/* Resume HCI device */
1794int hci_resume_dev(struct hci_dev *hdev)
1795{
1796 hci_notify(hdev, HCI_DEV_RESUME);
1797 return 0;
1798}
1799EXPORT_SYMBOL(hci_resume_dev);
1800
Marcel Holtmann76bca882009-11-18 00:40:39 +01001801/* Receive frame from HCI drivers */
1802int hci_recv_frame(struct sk_buff *skb)
1803{
1804 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1805 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001806 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001807 kfree_skb(skb);
1808 return -ENXIO;
1809 }
1810
1811 /* Incomming skb */
1812 bt_cb(skb)->incoming = 1;
1813
1814 /* Time stamp */
1815 __net_timestamp(skb);
1816
Marcel Holtmann76bca882009-11-18 00:40:39 +01001817 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001818 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001819
Marcel Holtmann76bca882009-11-18 00:40:39 +01001820 return 0;
1821}
1822EXPORT_SYMBOL(hci_recv_frame);
1823
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301824static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001825 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301826{
1827 int len = 0;
1828 int hlen = 0;
1829 int remain = count;
1830 struct sk_buff *skb;
1831 struct bt_skb_cb *scb;
1832
1833 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001834 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301835 return -EILSEQ;
1836
1837 skb = hdev->reassembly[index];
1838
1839 if (!skb) {
1840 switch (type) {
1841 case HCI_ACLDATA_PKT:
1842 len = HCI_MAX_FRAME_SIZE;
1843 hlen = HCI_ACL_HDR_SIZE;
1844 break;
1845 case HCI_EVENT_PKT:
1846 len = HCI_MAX_EVENT_SIZE;
1847 hlen = HCI_EVENT_HDR_SIZE;
1848 break;
1849 case HCI_SCODATA_PKT:
1850 len = HCI_MAX_SCO_SIZE;
1851 hlen = HCI_SCO_HDR_SIZE;
1852 break;
1853 }
1854
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001855 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301856 if (!skb)
1857 return -ENOMEM;
1858
1859 scb = (void *) skb->cb;
1860 scb->expect = hlen;
1861 scb->pkt_type = type;
1862
1863 skb->dev = (void *) hdev;
1864 hdev->reassembly[index] = skb;
1865 }
1866
1867 while (count) {
1868 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001869 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301870
1871 memcpy(skb_put(skb, len), data, len);
1872
1873 count -= len;
1874 data += len;
1875 scb->expect -= len;
1876 remain = count;
1877
1878 switch (type) {
1879 case HCI_EVENT_PKT:
1880 if (skb->len == HCI_EVENT_HDR_SIZE) {
1881 struct hci_event_hdr *h = hci_event_hdr(skb);
1882 scb->expect = h->plen;
1883
1884 if (skb_tailroom(skb) < scb->expect) {
1885 kfree_skb(skb);
1886 hdev->reassembly[index] = NULL;
1887 return -ENOMEM;
1888 }
1889 }
1890 break;
1891
1892 case HCI_ACLDATA_PKT:
1893 if (skb->len == HCI_ACL_HDR_SIZE) {
1894 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1895 scb->expect = __le16_to_cpu(h->dlen);
1896
1897 if (skb_tailroom(skb) < scb->expect) {
1898 kfree_skb(skb);
1899 hdev->reassembly[index] = NULL;
1900 return -ENOMEM;
1901 }
1902 }
1903 break;
1904
1905 case HCI_SCODATA_PKT:
1906 if (skb->len == HCI_SCO_HDR_SIZE) {
1907 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1908 scb->expect = h->dlen;
1909
1910 if (skb_tailroom(skb) < scb->expect) {
1911 kfree_skb(skb);
1912 hdev->reassembly[index] = NULL;
1913 return -ENOMEM;
1914 }
1915 }
1916 break;
1917 }
1918
1919 if (scb->expect == 0) {
1920 /* Complete frame */
1921
1922 bt_cb(skb)->pkt_type = type;
1923 hci_recv_frame(skb);
1924
1925 hdev->reassembly[index] = NULL;
1926 return remain;
1927 }
1928 }
1929
1930 return remain;
1931}
1932
Marcel Holtmannef222012007-07-11 06:42:04 +02001933int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1934{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301935 int rem = 0;
1936
Marcel Holtmannef222012007-07-11 06:42:04 +02001937 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1938 return -EILSEQ;
1939
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001940 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001941 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301942 if (rem < 0)
1943 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001944
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301945 data += (count - rem);
1946 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001947 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001948
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301949 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001950}
1951EXPORT_SYMBOL(hci_recv_fragment);
1952
Suraj Sumangala99811512010-07-14 13:02:19 +05301953#define STREAM_REASSEMBLY 0
1954
1955int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1956{
1957 int type;
1958 int rem = 0;
1959
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001960 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301961 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1962
1963 if (!skb) {
1964 struct { char type; } *pkt;
1965
1966 /* Start of the frame */
1967 pkt = data;
1968 type = pkt->type;
1969
1970 data++;
1971 count--;
1972 } else
1973 type = bt_cb(skb)->pkt_type;
1974
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001975 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001976 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301977 if (rem < 0)
1978 return rem;
1979
1980 data += (count - rem);
1981 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001982 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301983
1984 return rem;
1985}
1986EXPORT_SYMBOL(hci_recv_stream_fragment);
1987
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988/* ---- Interface to upper protocols ---- */
1989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990int hci_register_cb(struct hci_cb *cb)
1991{
1992 BT_DBG("%p name %s", cb, cb->name);
1993
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001994 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001996 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
1998 return 0;
1999}
2000EXPORT_SYMBOL(hci_register_cb);
2001
2002int hci_unregister_cb(struct hci_cb *cb)
2003{
2004 BT_DBG("%p name %s", cb, cb->name);
2005
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002006 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002008 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009
2010 return 0;
2011}
2012EXPORT_SYMBOL(hci_unregister_cb);
2013
2014static int hci_send_frame(struct sk_buff *skb)
2015{
2016 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2017
2018 if (!hdev) {
2019 kfree_skb(skb);
2020 return -ENODEV;
2021 }
2022
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002023 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002025 /* Time stamp */
2026 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002028 /* Send copy to monitor */
2029 hci_send_to_monitor(hdev, skb);
2030
2031 if (atomic_read(&hdev->promisc)) {
2032 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002033 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 }
2035
2036 /* Get rid of skb owner, prior to sending to the driver. */
2037 skb_orphan(skb);
2038
2039 return hdev->send(skb);
2040}
2041
2042/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002043int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044{
2045 int len = HCI_COMMAND_HDR_SIZE + plen;
2046 struct hci_command_hdr *hdr;
2047 struct sk_buff *skb;
2048
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002049 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
2051 skb = bt_skb_alloc(len, GFP_ATOMIC);
2052 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002053 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 return -ENOMEM;
2055 }
2056
2057 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002058 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 hdr->plen = plen;
2060
2061 if (plen)
2062 memcpy(skb_put(skb, plen), param, plen);
2063
2064 BT_DBG("skb len %d", skb->len);
2065
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002066 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002068
Johan Hedberga5040ef2011-01-10 13:28:59 +02002069 if (test_bit(HCI_INIT, &hdev->flags))
2070 hdev->init_last_cmd = opcode;
2071
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002073 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
2075 return 0;
2076}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
2078/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002079void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080{
2081 struct hci_command_hdr *hdr;
2082
2083 if (!hdev->sent_cmd)
2084 return NULL;
2085
2086 hdr = (void *) hdev->sent_cmd->data;
2087
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002088 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 return NULL;
2090
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002091 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
2093 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2094}
2095
2096/* Send ACL data */
2097static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2098{
2099 struct hci_acl_hdr *hdr;
2100 int len = skb->len;
2101
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002102 skb_push(skb, HCI_ACL_HDR_SIZE);
2103 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002104 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002105 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2106 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107}
2108
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002109static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002110 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002112 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 struct hci_dev *hdev = conn->hdev;
2114 struct sk_buff *list;
2115
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002116 skb->len = skb_headlen(skb);
2117 skb->data_len = 0;
2118
2119 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002120
2121 switch (hdev->dev_type) {
2122 case HCI_BREDR:
2123 hci_add_acl_hdr(skb, conn->handle, flags);
2124 break;
2125 case HCI_AMP:
2126 hci_add_acl_hdr(skb, chan->handle, flags);
2127 break;
2128 default:
2129 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2130 return;
2131 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002132
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002133 list = skb_shinfo(skb)->frag_list;
2134 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 /* Non fragmented */
2136 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2137
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002138 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 } else {
2140 /* Fragmented */
2141 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2142
2143 skb_shinfo(skb)->frag_list = NULL;
2144
2145 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002146 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002148 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002149
2150 flags &= ~ACL_START;
2151 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 do {
2153 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002154
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002156 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002157 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
2159 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2160
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002161 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 } while (list);
2163
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002164 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002166}
2167
2168void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2169{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002170 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002171
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002172 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002173
2174 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002175
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002176 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002178 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
2181/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002182void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183{
2184 struct hci_dev *hdev = conn->hdev;
2185 struct hci_sco_hdr hdr;
2186
2187 BT_DBG("%s len %d", hdev->name, skb->len);
2188
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002189 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 hdr.dlen = skb->len;
2191
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002192 skb_push(skb, HCI_SCO_HDR_SIZE);
2193 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002194 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
2196 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002197 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002198
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002200 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
2203/* ---- HCI TX task (outgoing data) ---- */
2204
2205/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002206static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2207 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208{
2209 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002210 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002211 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002213 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002215
2216 rcu_read_lock();
2217
2218 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002219 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002221
2222 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2223 continue;
2224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 num++;
2226
2227 if (c->sent < min) {
2228 min = c->sent;
2229 conn = c;
2230 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002231
2232 if (hci_conn_num(hdev, type) == num)
2233 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 }
2235
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002236 rcu_read_unlock();
2237
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002239 int cnt, q;
2240
2241 switch (conn->type) {
2242 case ACL_LINK:
2243 cnt = hdev->acl_cnt;
2244 break;
2245 case SCO_LINK:
2246 case ESCO_LINK:
2247 cnt = hdev->sco_cnt;
2248 break;
2249 case LE_LINK:
2250 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2251 break;
2252 default:
2253 cnt = 0;
2254 BT_ERR("Unknown link type");
2255 }
2256
2257 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 *quote = q ? q : 1;
2259 } else
2260 *quote = 0;
2261
2262 BT_DBG("conn %p quote %d", conn, *quote);
2263 return conn;
2264}
2265
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002266static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267{
2268 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002269 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
Ville Tervobae1f5d92011-02-10 22:38:53 -03002271 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002273 rcu_read_lock();
2274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002276 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002277 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002278 BT_ERR("%s killing stalled connection %pMR",
2279 hdev->name, &c->dst);
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002280 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 }
2282 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002283
2284 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285}
2286
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002287static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2288 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002289{
2290 struct hci_conn_hash *h = &hdev->conn_hash;
2291 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002292 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002293 struct hci_conn *conn;
2294 int cnt, q, conn_num = 0;
2295
2296 BT_DBG("%s", hdev->name);
2297
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002298 rcu_read_lock();
2299
2300 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002301 struct hci_chan *tmp;
2302
2303 if (conn->type != type)
2304 continue;
2305
2306 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2307 continue;
2308
2309 conn_num++;
2310
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002311 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002312 struct sk_buff *skb;
2313
2314 if (skb_queue_empty(&tmp->data_q))
2315 continue;
2316
2317 skb = skb_peek(&tmp->data_q);
2318 if (skb->priority < cur_prio)
2319 continue;
2320
2321 if (skb->priority > cur_prio) {
2322 num = 0;
2323 min = ~0;
2324 cur_prio = skb->priority;
2325 }
2326
2327 num++;
2328
2329 if (conn->sent < min) {
2330 min = conn->sent;
2331 chan = tmp;
2332 }
2333 }
2334
2335 if (hci_conn_num(hdev, type) == conn_num)
2336 break;
2337 }
2338
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002339 rcu_read_unlock();
2340
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002341 if (!chan)
2342 return NULL;
2343
2344 switch (chan->conn->type) {
2345 case ACL_LINK:
2346 cnt = hdev->acl_cnt;
2347 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002348 case AMP_LINK:
2349 cnt = hdev->block_cnt;
2350 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002351 case SCO_LINK:
2352 case ESCO_LINK:
2353 cnt = hdev->sco_cnt;
2354 break;
2355 case LE_LINK:
2356 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2357 break;
2358 default:
2359 cnt = 0;
2360 BT_ERR("Unknown link type");
2361 }
2362
2363 q = cnt / num;
2364 *quote = q ? q : 1;
2365 BT_DBG("chan %p quote %d", chan, *quote);
2366 return chan;
2367}
2368
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002369static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2370{
2371 struct hci_conn_hash *h = &hdev->conn_hash;
2372 struct hci_conn *conn;
2373 int num = 0;
2374
2375 BT_DBG("%s", hdev->name);
2376
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002377 rcu_read_lock();
2378
2379 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002380 struct hci_chan *chan;
2381
2382 if (conn->type != type)
2383 continue;
2384
2385 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2386 continue;
2387
2388 num++;
2389
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002390 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002391 struct sk_buff *skb;
2392
2393 if (chan->sent) {
2394 chan->sent = 0;
2395 continue;
2396 }
2397
2398 if (skb_queue_empty(&chan->data_q))
2399 continue;
2400
2401 skb = skb_peek(&chan->data_q);
2402 if (skb->priority >= HCI_PRIO_MAX - 1)
2403 continue;
2404
2405 skb->priority = HCI_PRIO_MAX - 1;
2406
2407 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002408 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002409 }
2410
2411 if (hci_conn_num(hdev, type) == num)
2412 break;
2413 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002414
2415 rcu_read_unlock();
2416
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002417}
2418
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002419static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2420{
2421 /* Calculate count of blocks used by this packet */
2422 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2423}
2424
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002425static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 if (!test_bit(HCI_RAW, &hdev->flags)) {
2428 /* ACL tx timeout must be longer than maximum
2429 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002430 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002431 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002432 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002434}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002436static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002437{
2438 unsigned int cnt = hdev->acl_cnt;
2439 struct hci_chan *chan;
2440 struct sk_buff *skb;
2441 int quote;
2442
2443 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002444
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002445 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002446 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002447 u32 priority = (skb_peek(&chan->data_q))->priority;
2448 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002449 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002450 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002451
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002452 /* Stop if priority has changed */
2453 if (skb->priority < priority)
2454 break;
2455
2456 skb = skb_dequeue(&chan->data_q);
2457
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002458 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002459 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002460
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 hci_send_frame(skb);
2462 hdev->acl_last_tx = jiffies;
2463
2464 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002465 chan->sent++;
2466 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 }
2468 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002469
2470 if (cnt != hdev->acl_cnt)
2471 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472}
2473
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002474static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002475{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002476 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002477 struct hci_chan *chan;
2478 struct sk_buff *skb;
2479 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002480 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002481
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002482 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002483
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002484 BT_DBG("%s", hdev->name);
2485
2486 if (hdev->dev_type == HCI_AMP)
2487 type = AMP_LINK;
2488 else
2489 type = ACL_LINK;
2490
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002491 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002492 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002493 u32 priority = (skb_peek(&chan->data_q))->priority;
2494 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2495 int blocks;
2496
2497 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002498 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002499
2500 /* Stop if priority has changed */
2501 if (skb->priority < priority)
2502 break;
2503
2504 skb = skb_dequeue(&chan->data_q);
2505
2506 blocks = __get_blocks(hdev, skb);
2507 if (blocks > hdev->block_cnt)
2508 return;
2509
2510 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002511 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002512
2513 hci_send_frame(skb);
2514 hdev->acl_last_tx = jiffies;
2515
2516 hdev->block_cnt -= blocks;
2517 quote -= blocks;
2518
2519 chan->sent += blocks;
2520 chan->conn->sent += blocks;
2521 }
2522 }
2523
2524 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002525 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002526}
2527
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002528static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002529{
2530 BT_DBG("%s", hdev->name);
2531
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002532 /* No ACL link over BR/EDR controller */
2533 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2534 return;
2535
2536 /* No AMP link over AMP controller */
2537 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002538 return;
2539
2540 switch (hdev->flow_ctl_mode) {
2541 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2542 hci_sched_acl_pkt(hdev);
2543 break;
2544
2545 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2546 hci_sched_acl_blk(hdev);
2547 break;
2548 }
2549}
2550
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002552static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553{
2554 struct hci_conn *conn;
2555 struct sk_buff *skb;
2556 int quote;
2557
2558 BT_DBG("%s", hdev->name);
2559
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002560 if (!hci_conn_num(hdev, SCO_LINK))
2561 return;
2562
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2564 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2565 BT_DBG("skb %p len %d", skb, skb->len);
2566 hci_send_frame(skb);
2567
2568 conn->sent++;
2569 if (conn->sent == ~0)
2570 conn->sent = 0;
2571 }
2572 }
2573}
2574
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002575static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002576{
2577 struct hci_conn *conn;
2578 struct sk_buff *skb;
2579 int quote;
2580
2581 BT_DBG("%s", hdev->name);
2582
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002583 if (!hci_conn_num(hdev, ESCO_LINK))
2584 return;
2585
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002586 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2587 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002588 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2589 BT_DBG("skb %p len %d", skb, skb->len);
2590 hci_send_frame(skb);
2591
2592 conn->sent++;
2593 if (conn->sent == ~0)
2594 conn->sent = 0;
2595 }
2596 }
2597}
2598
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002599static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002600{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002601 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002602 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002603 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002604
2605 BT_DBG("%s", hdev->name);
2606
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002607 if (!hci_conn_num(hdev, LE_LINK))
2608 return;
2609
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002610 if (!test_bit(HCI_RAW, &hdev->flags)) {
2611 /* LE tx timeout must be longer than maximum
2612 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002613 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002614 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002615 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002616 }
2617
2618 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002619 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002620 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002621 u32 priority = (skb_peek(&chan->data_q))->priority;
2622 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002623 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002624 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002625
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002626 /* Stop if priority has changed */
2627 if (skb->priority < priority)
2628 break;
2629
2630 skb = skb_dequeue(&chan->data_q);
2631
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002632 hci_send_frame(skb);
2633 hdev->le_last_tx = jiffies;
2634
2635 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002636 chan->sent++;
2637 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002638 }
2639 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002640
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002641 if (hdev->le_pkts)
2642 hdev->le_cnt = cnt;
2643 else
2644 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002645
2646 if (cnt != tmp)
2647 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002648}
2649
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002650static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002652 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 struct sk_buff *skb;
2654
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002655 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002656 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
2658 /* Schedule queues and send stuff to HCI driver */
2659
2660 hci_sched_acl(hdev);
2661
2662 hci_sched_sco(hdev);
2663
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002664 hci_sched_esco(hdev);
2665
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002666 hci_sched_le(hdev);
2667
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 /* Send next queued raw (unknown type) packet */
2669 while ((skb = skb_dequeue(&hdev->raw_q)))
2670 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671}
2672
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002673/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674
2675/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002676static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677{
2678 struct hci_acl_hdr *hdr = (void *) skb->data;
2679 struct hci_conn *conn;
2680 __u16 handle, flags;
2681
2682 skb_pull(skb, HCI_ACL_HDR_SIZE);
2683
2684 handle = __le16_to_cpu(hdr->handle);
2685 flags = hci_flags(handle);
2686 handle = hci_handle(handle);
2687
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002688 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002689 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
2691 hdev->stat.acl_rx++;
2692
2693 hci_dev_lock(hdev);
2694 conn = hci_conn_hash_lookup_handle(hdev, handle);
2695 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002696
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002698 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002699
Johan Hedberg671267b2012-05-12 16:11:50 -03002700 hci_dev_lock(hdev);
2701 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2702 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2703 mgmt_device_connected(hdev, &conn->dst, conn->type,
2704 conn->dst_type, 0, NULL, 0,
2705 conn->dev_class);
2706 hci_dev_unlock(hdev);
2707
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002709 l2cap_recv_acldata(conn, skb, flags);
2710 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002712 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002713 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 }
2715
2716 kfree_skb(skb);
2717}
2718
2719/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002720static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721{
2722 struct hci_sco_hdr *hdr = (void *) skb->data;
2723 struct hci_conn *conn;
2724 __u16 handle;
2725
2726 skb_pull(skb, HCI_SCO_HDR_SIZE);
2727
2728 handle = __le16_to_cpu(hdr->handle);
2729
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002730 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731
2732 hdev->stat.sco_rx++;
2733
2734 hci_dev_lock(hdev);
2735 conn = hci_conn_hash_lookup_handle(hdev, handle);
2736 hci_dev_unlock(hdev);
2737
2738 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002740 sco_recv_scodata(conn, skb);
2741 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002743 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002744 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 }
2746
2747 kfree_skb(skb);
2748}
2749
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002750static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002752 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 struct sk_buff *skb;
2754
2755 BT_DBG("%s", hdev->name);
2756
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002758 /* Send copy to monitor */
2759 hci_send_to_monitor(hdev, skb);
2760
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 if (atomic_read(&hdev->promisc)) {
2762 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002763 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 }
2765
2766 if (test_bit(HCI_RAW, &hdev->flags)) {
2767 kfree_skb(skb);
2768 continue;
2769 }
2770
2771 if (test_bit(HCI_INIT, &hdev->flags)) {
2772 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002773 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 case HCI_ACLDATA_PKT:
2775 case HCI_SCODATA_PKT:
2776 kfree_skb(skb);
2777 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002778 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 }
2780
2781 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002782 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002784 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 hci_event_packet(hdev, skb);
2786 break;
2787
2788 case HCI_ACLDATA_PKT:
2789 BT_DBG("%s ACL data packet", hdev->name);
2790 hci_acldata_packet(hdev, skb);
2791 break;
2792
2793 case HCI_SCODATA_PKT:
2794 BT_DBG("%s SCO data packet", hdev->name);
2795 hci_scodata_packet(hdev, skb);
2796 break;
2797
2798 default:
2799 kfree_skb(skb);
2800 break;
2801 }
2802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803}
2804
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002805static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002807 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 struct sk_buff *skb;
2809
Andrei Emeltchenko21047862012-07-10 15:27:47 +03002810 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2811 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002814 if (atomic_read(&hdev->cmd_cnt)) {
2815 skb = skb_dequeue(&hdev->cmd_q);
2816 if (!skb)
2817 return;
2818
Wei Yongjun7585b972009-02-25 18:29:52 +08002819 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002821 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2822 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 atomic_dec(&hdev->cmd_cnt);
2824 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002825 if (test_bit(HCI_RESET, &hdev->flags))
2826 del_timer(&hdev->cmd_timer);
2827 else
2828 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002829 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 } else {
2831 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002832 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 }
2834 }
2835}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002836
2837int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2838{
2839 /* General inquiry access code (GIAC) */
2840 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2841 struct hci_cp_inquiry cp;
2842
2843 BT_DBG("%s", hdev->name);
2844
2845 if (test_bit(HCI_INQUIRY, &hdev->flags))
2846 return -EINPROGRESS;
2847
Johan Hedberg46632622012-01-02 16:06:08 +02002848 inquiry_cache_flush(hdev);
2849
Andre Guedes2519a1f2011-11-07 11:45:24 -03002850 memset(&cp, 0, sizeof(cp));
2851 memcpy(&cp.lap, lap, sizeof(cp.lap));
2852 cp.length = length;
2853
2854 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2855}
Andre Guedes023d50492011-11-04 14:16:52 -03002856
2857int hci_cancel_inquiry(struct hci_dev *hdev)
2858{
2859 BT_DBG("%s", hdev->name);
2860
2861 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002862 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002863
2864 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2865}
Andre Guedes31f79562012-04-24 21:02:53 -03002866
2867u8 bdaddr_to_le(u8 bdaddr_type)
2868{
2869 switch (bdaddr_type) {
2870 case BDADDR_LE_PUBLIC:
2871 return ADDR_LE_DEV_PUBLIC;
2872
2873 default:
2874 /* Fallback to LE Random address type */
2875 return ADDR_LE_DEV_RANDOM;
2876 }
2877}