blob: 1d3c8920f497bae1d722712b061de325ccc30b25 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmanndfb826a2013-10-18 12:04:46 -070061static int features_show(struct seq_file *f, void *ptr)
62{
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
75 hci_dev_unlock(hdev);
76
77 return 0;
78}
79
80static int features_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, features_show, inode->i_private);
83}
84
85static const struct file_operations features_fops = {
86 .open = features_open,
87 .read = seq_read,
88 .llseek = seq_lseek,
89 .release = single_release,
90};
91
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070092static int blacklist_show(struct seq_file *f, void *p)
93{
94 struct hci_dev *hdev = f->private;
95 struct bdaddr_list *b;
96
97 hci_dev_lock(hdev);
98 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -070099 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700100 hci_dev_unlock(hdev);
101
102 return 0;
103}
104
105static int blacklist_open(struct inode *inode, struct file *file)
106{
107 return single_open(file, blacklist_show, inode->i_private);
108}
109
110static const struct file_operations blacklist_fops = {
111 .open = blacklist_open,
112 .read = seq_read,
113 .llseek = seq_lseek,
114 .release = single_release,
115};
116
Marcel Holtmann47219832013-10-17 17:24:15 -0700117static int uuids_show(struct seq_file *f, void *p)
118{
119 struct hci_dev *hdev = f->private;
120 struct bt_uuid *uuid;
121
122 hci_dev_lock(hdev);
123 list_for_each_entry(uuid, &hdev->uuids, list) {
124 u32 data0, data5;
125 u16 data1, data2, data3, data4;
126
127 data5 = get_unaligned_le32(uuid);
128 data4 = get_unaligned_le16(uuid + 4);
129 data3 = get_unaligned_le16(uuid + 6);
130 data2 = get_unaligned_le16(uuid + 8);
131 data1 = get_unaligned_le16(uuid + 10);
132 data0 = get_unaligned_le32(uuid + 12);
133
134 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135 data0, data1, data2, data3, data4, data5);
136 }
137 hci_dev_unlock(hdev);
138
139 return 0;
140}
141
142static int uuids_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, uuids_show, inode->i_private);
145}
146
147static const struct file_operations uuids_fops = {
148 .open = uuids_open,
149 .read = seq_read,
150 .llseek = seq_lseek,
151 .release = single_release,
152};
153
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700154static int inquiry_cache_show(struct seq_file *f, void *p)
155{
156 struct hci_dev *hdev = f->private;
157 struct discovery_state *cache = &hdev->discovery;
158 struct inquiry_entry *e;
159
160 hci_dev_lock(hdev);
161
162 list_for_each_entry(e, &cache->all, all) {
163 struct inquiry_data *data = &e->data;
164 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
165 &data->bdaddr,
166 data->pscan_rep_mode, data->pscan_period_mode,
167 data->pscan_mode, data->dev_class[2],
168 data->dev_class[1], data->dev_class[0],
169 __le16_to_cpu(data->clock_offset),
170 data->rssi, data->ssp_mode, e->timestamp);
171 }
172
173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int inquiry_cache_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, inquiry_cache_show, inode->i_private);
181}
182
183static const struct file_operations inquiry_cache_fops = {
184 .open = inquiry_cache_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
Marcel Holtmann041000b2013-10-17 12:02:31 -0700190static int voice_setting_get(void *data, u64 *val)
191{
192 struct hci_dev *hdev = data;
193
194 hci_dev_lock(hdev);
195 *val = hdev->voice_setting;
196 hci_dev_unlock(hdev);
197
198 return 0;
199}
200
201DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
202 NULL, "0x%4.4llx\n");
203
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700204static int auto_accept_delay_set(void *data, u64 val)
205{
206 struct hci_dev *hdev = data;
207
208 hci_dev_lock(hdev);
209 hdev->auto_accept_delay = val;
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int auto_accept_delay_get(void *data, u64 *val)
216{
217 struct hci_dev *hdev = data;
218
219 hci_dev_lock(hdev);
220 *val = hdev->auto_accept_delay;
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
227 auto_accept_delay_set, "%llu\n");
228
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700229static int idle_timeout_set(void *data, u64 val)
230{
231 struct hci_dev *hdev = data;
232
233 if (val != 0 && (val < 500 || val > 3600000))
234 return -EINVAL;
235
236 hci_dev_lock(hdev);
237 hdev->idle_timeout= val;
238 hci_dev_unlock(hdev);
239
240 return 0;
241}
242
243static int idle_timeout_get(void *data, u64 *val)
244{
245 struct hci_dev *hdev = data;
246
247 hci_dev_lock(hdev);
248 *val = hdev->idle_timeout;
249 hci_dev_unlock(hdev);
250
251 return 0;
252}
253
254DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
255 idle_timeout_set, "%llu\n");
256
257static int sniff_min_interval_set(void *data, u64 val)
258{
259 struct hci_dev *hdev = data;
260
261 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
262 return -EINVAL;
263
264 hci_dev_lock(hdev);
265 hdev->sniff_min_interval= val;
266 hci_dev_unlock(hdev);
267
268 return 0;
269}
270
271static int sniff_min_interval_get(void *data, u64 *val)
272{
273 struct hci_dev *hdev = data;
274
275 hci_dev_lock(hdev);
276 *val = hdev->sniff_min_interval;
277 hci_dev_unlock(hdev);
278
279 return 0;
280}
281
282DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
283 sniff_min_interval_set, "%llu\n");
284
285static int sniff_max_interval_set(void *data, u64 val)
286{
287 struct hci_dev *hdev = data;
288
289 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
290 return -EINVAL;
291
292 hci_dev_lock(hdev);
293 hdev->sniff_max_interval= val;
294 hci_dev_unlock(hdev);
295
296 return 0;
297}
298
299static int sniff_max_interval_get(void *data, u64 *val)
300{
301 struct hci_dev *hdev = data;
302
303 hci_dev_lock(hdev);
304 *val = hdev->sniff_max_interval;
305 hci_dev_unlock(hdev);
306
307 return 0;
308}
309
310DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
311 sniff_max_interval_set, "%llu\n");
312
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700313static int static_address_show(struct seq_file *f, void *p)
314{
315 struct hci_dev *hdev = f->private;
316
317 hci_dev_lock(hdev);
318 seq_printf(f, "%pMR\n", &hdev->static_addr);
319 hci_dev_unlock(hdev);
320
321 return 0;
322}
323
324static int static_address_open(struct inode *inode, struct file *file)
325{
326 return single_open(file, static_address_show, inode->i_private);
327}
328
329static const struct file_operations static_address_fops = {
330 .open = static_address_open,
331 .read = seq_read,
332 .llseek = seq_lseek,
333 .release = single_release,
334};
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336/* ---- HCI requests ---- */
337
Johan Hedberg42c6b122013-03-05 20:37:49 +0200338static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200340 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342 if (hdev->req_status == HCI_REQ_PEND) {
343 hdev->req_result = result;
344 hdev->req_status = HCI_REQ_DONE;
345 wake_up_interruptible(&hdev->req_wait_q);
346 }
347}
348
349static void hci_req_cancel(struct hci_dev *hdev, int err)
350{
351 BT_DBG("%s err 0x%2.2x", hdev->name, err);
352
353 if (hdev->req_status == HCI_REQ_PEND) {
354 hdev->req_result = err;
355 hdev->req_status = HCI_REQ_CANCELED;
356 wake_up_interruptible(&hdev->req_wait_q);
357 }
358}
359
Fengguang Wu77a63e02013-04-20 16:24:31 +0300360static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
361 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300362{
363 struct hci_ev_cmd_complete *ev;
364 struct hci_event_hdr *hdr;
365 struct sk_buff *skb;
366
367 hci_dev_lock(hdev);
368
369 skb = hdev->recv_evt;
370 hdev->recv_evt = NULL;
371
372 hci_dev_unlock(hdev);
373
374 if (!skb)
375 return ERR_PTR(-ENODATA);
376
377 if (skb->len < sizeof(*hdr)) {
378 BT_ERR("Too short HCI event");
379 goto failed;
380 }
381
382 hdr = (void *) skb->data;
383 skb_pull(skb, HCI_EVENT_HDR_SIZE);
384
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300385 if (event) {
386 if (hdr->evt != event)
387 goto failed;
388 return skb;
389 }
390
Johan Hedberg75e84b72013-04-02 13:35:04 +0300391 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
392 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
393 goto failed;
394 }
395
396 if (skb->len < sizeof(*ev)) {
397 BT_ERR("Too short cmd_complete event");
398 goto failed;
399 }
400
401 ev = (void *) skb->data;
402 skb_pull(skb, sizeof(*ev));
403
404 if (opcode == __le16_to_cpu(ev->opcode))
405 return skb;
406
407 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
408 __le16_to_cpu(ev->opcode));
409
410failed:
411 kfree_skb(skb);
412 return ERR_PTR(-ENODATA);
413}
414
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300415struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300416 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300417{
418 DECLARE_WAITQUEUE(wait, current);
419 struct hci_request req;
420 int err = 0;
421
422 BT_DBG("%s", hdev->name);
423
424 hci_req_init(&req, hdev);
425
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300426 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300427
428 hdev->req_status = HCI_REQ_PEND;
429
430 err = hci_req_run(&req, hci_req_sync_complete);
431 if (err < 0)
432 return ERR_PTR(err);
433
434 add_wait_queue(&hdev->req_wait_q, &wait);
435 set_current_state(TASK_INTERRUPTIBLE);
436
437 schedule_timeout(timeout);
438
439 remove_wait_queue(&hdev->req_wait_q, &wait);
440
441 if (signal_pending(current))
442 return ERR_PTR(-EINTR);
443
444 switch (hdev->req_status) {
445 case HCI_REQ_DONE:
446 err = -bt_to_errno(hdev->req_result);
447 break;
448
449 case HCI_REQ_CANCELED:
450 err = -hdev->req_result;
451 break;
452
453 default:
454 err = -ETIMEDOUT;
455 break;
456 }
457
458 hdev->req_status = hdev->req_result = 0;
459
460 BT_DBG("%s end: err %d", hdev->name, err);
461
462 if (err < 0)
463 return ERR_PTR(err);
464
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300465 return hci_get_cmd_complete(hdev, opcode, event);
466}
467EXPORT_SYMBOL(__hci_cmd_sync_ev);
468
469struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300470 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300471{
472 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300473}
474EXPORT_SYMBOL(__hci_cmd_sync);
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200477static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200478 void (*func)(struct hci_request *req,
479 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200480 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200482 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 DECLARE_WAITQUEUE(wait, current);
484 int err = 0;
485
486 BT_DBG("%s start", hdev->name);
487
Johan Hedberg42c6b122013-03-05 20:37:49 +0200488 hci_req_init(&req, hdev);
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 hdev->req_status = HCI_REQ_PEND;
491
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 err = hci_req_run(&req, hci_req_sync_complete);
495 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200496 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300497
498 /* ENODATA means the HCI request command queue is empty.
499 * This can happen when a request with conditionals doesn't
500 * trigger any commands to be sent. This is normal behavior
501 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 */
Andre Guedes920c8302013-03-08 11:20:15 -0300503 if (err == -ENODATA)
504 return 0;
505
506 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200507 }
508
Andre Guedesbc4445c2013-03-08 11:20:13 -0300509 add_wait_queue(&hdev->req_wait_q, &wait);
510 set_current_state(TASK_INTERRUPTIBLE);
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 schedule_timeout(timeout);
513
514 remove_wait_queue(&hdev->req_wait_q, &wait);
515
516 if (signal_pending(current))
517 return -EINTR;
518
519 switch (hdev->req_status) {
520 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700521 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 break;
523
524 case HCI_REQ_CANCELED:
525 err = -hdev->req_result;
526 break;
527
528 default:
529 err = -ETIMEDOUT;
530 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Johan Hedberga5040ef2011-01-10 13:28:59 +0200533 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 BT_DBG("%s end: err %d", hdev->name, err);
536
537 return err;
538}
539
Johan Hedberg01178cd2013-03-05 20:37:41 +0200540static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200541 void (*req)(struct hci_request *req,
542 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200543 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
545 int ret;
546
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200547 if (!test_bit(HCI_UP, &hdev->flags))
548 return -ENETDOWN;
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 /* Serialize all requests */
551 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200552 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 hci_req_unlock(hdev);
554
555 return ret;
556}
557
Johan Hedberg42c6b122013-03-05 20:37:49 +0200558static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200560 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200563 set_bit(HCI_RESET, &req->hdev->flags);
564 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
566
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200569 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200574 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200575 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200576
577 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200578 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579}
580
Johan Hedberg42c6b122013-03-05 20:37:49 +0200581static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200582{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200584
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200585 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300587
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700588 /* Read Local Supported Commands */
589 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
590
591 /* Read Local Supported Features */
592 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
593
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300594 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300596
597 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700599
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700600 /* Read Flow Control Mode */
601 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
602
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700603 /* Read Location Data */
604 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200605}
606
Johan Hedberg42c6b122013-03-05 20:37:49 +0200607static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200608{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200609 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200610
611 BT_DBG("%s %ld", hdev->name, opt);
612
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300613 /* Reset */
614 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300616
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200617 switch (hdev->dev_type) {
618 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200620 break;
621
622 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200623 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200624 break;
625
626 default:
627 BT_ERR("Unknown device type %d", hdev->dev_type);
628 break;
629 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200630}
631
Johan Hedberg42c6b122013-03-05 20:37:49 +0200632static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700634 struct hci_dev *hdev = req->hdev;
635
Johan Hedberg2177bab2013-03-05 20:37:43 +0200636 __le16 param;
637 __u8 flt_type;
638
639 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200641
642 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200644
645 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200646 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200647
648 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200649 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200650
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700651 /* Read Number of Supported IAC */
652 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
653
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700654 /* Read Current IAC LAP */
655 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
656
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657 /* Clear Event Filters */
658 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660
661 /* Connection accept timeout ~20 secs */
662 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200663 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200664
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700665 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
666 * but it does not support page scan related HCI commands.
667 */
668 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500669 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
670 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
671 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200672}
673
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300676 struct hci_dev *hdev = req->hdev;
677
Johan Hedberg2177bab2013-03-05 20:37:43 +0200678 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200679 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200680
681 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200682 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200683
684 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200685 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200686
687 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200688 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200689
690 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300692
693 /* LE-only controllers have LE implicitly enabled */
694 if (!lmp_bredr_capable(hdev))
695 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200696}
697
698static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
699{
700 if (lmp_ext_inq_capable(hdev))
701 return 0x02;
702
703 if (lmp_inq_rssi_capable(hdev))
704 return 0x01;
705
706 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
707 hdev->lmp_subver == 0x0757)
708 return 0x01;
709
710 if (hdev->manufacturer == 15) {
711 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
712 return 0x01;
713 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
714 return 0x01;
715 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
716 return 0x01;
717 }
718
719 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
720 hdev->lmp_subver == 0x1805)
721 return 0x01;
722
723 return 0x00;
724}
725
Johan Hedberg42c6b122013-03-05 20:37:49 +0200726static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200727{
728 u8 mode;
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200733}
734
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200736{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 struct hci_dev *hdev = req->hdev;
738
Johan Hedberg2177bab2013-03-05 20:37:43 +0200739 /* The second byte is 0xff instead of 0x9f (two reserved bits
740 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
741 * command otherwise.
742 */
743 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
744
745 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
746 * any event mask for pre 1.2 devices.
747 */
748 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
749 return;
750
751 if (lmp_bredr_capable(hdev)) {
752 events[4] |= 0x01; /* Flow Specification Complete */
753 events[4] |= 0x02; /* Inquiry Result with RSSI */
754 events[4] |= 0x04; /* Read Remote Extended Features Complete */
755 events[5] |= 0x08; /* Synchronous Connection Complete */
756 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700757 } else {
758 /* Use a different default for LE-only devices */
759 memset(events, 0, sizeof(events));
760 events[0] |= 0x10; /* Disconnection Complete */
761 events[0] |= 0x80; /* Encryption Change */
762 events[1] |= 0x08; /* Read Remote Version Information Complete */
763 events[1] |= 0x20; /* Command Complete */
764 events[1] |= 0x40; /* Command Status */
765 events[1] |= 0x80; /* Hardware Error */
766 events[2] |= 0x04; /* Number of Completed Packets */
767 events[3] |= 0x02; /* Data Buffer Overflow */
768 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200769 }
770
771 if (lmp_inq_rssi_capable(hdev))
772 events[4] |= 0x02; /* Inquiry Result with RSSI */
773
774 if (lmp_sniffsubr_capable(hdev))
775 events[5] |= 0x20; /* Sniff Subrating */
776
777 if (lmp_pause_enc_capable(hdev))
778 events[5] |= 0x80; /* Encryption Key Refresh Complete */
779
780 if (lmp_ext_inq_capable(hdev))
781 events[5] |= 0x40; /* Extended Inquiry Result */
782
783 if (lmp_no_flush_capable(hdev))
784 events[7] |= 0x01; /* Enhanced Flush Complete */
785
786 if (lmp_lsto_capable(hdev))
787 events[6] |= 0x80; /* Link Supervision Timeout Changed */
788
789 if (lmp_ssp_capable(hdev)) {
790 events[6] |= 0x01; /* IO Capability Request */
791 events[6] |= 0x02; /* IO Capability Response */
792 events[6] |= 0x04; /* User Confirmation Request */
793 events[6] |= 0x08; /* User Passkey Request */
794 events[6] |= 0x10; /* Remote OOB Data Request */
795 events[6] |= 0x20; /* Simple Pairing Complete */
796 events[7] |= 0x04; /* User Passkey Notification */
797 events[7] |= 0x08; /* Keypress Notification */
798 events[7] |= 0x10; /* Remote Host Supported
799 * Features Notification
800 */
801 }
802
803 if (lmp_le_capable(hdev))
804 events[7] |= 0x20; /* LE Meta-Event */
805
Johan Hedberg42c6b122013-03-05 20:37:49 +0200806 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200807
808 if (lmp_le_capable(hdev)) {
809 memset(events, 0, sizeof(events));
810 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200811 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
812 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200813 }
814}
815
Johan Hedberg42c6b122013-03-05 20:37:49 +0200816static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200817{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200818 struct hci_dev *hdev = req->hdev;
819
Johan Hedberg2177bab2013-03-05 20:37:43 +0200820 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200821 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300822 else
823 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200824
825 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200826 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200827
Johan Hedberg42c6b122013-03-05 20:37:49 +0200828 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200829
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300830 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
831 * local supported commands HCI command.
832 */
833 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200834 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200835
836 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700837 /* When SSP is available, then the host features page
838 * should also be available as well. However some
839 * controllers list the max_page as 0 as long as SSP
840 * has not been enabled. To achieve proper debugging
841 * output, force the minimum max_page to 1 at least.
842 */
843 hdev->max_page = 0x01;
844
Johan Hedberg2177bab2013-03-05 20:37:43 +0200845 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
846 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200847 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
848 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200849 } else {
850 struct hci_cp_write_eir cp;
851
852 memset(hdev->eir, 0, sizeof(hdev->eir));
853 memset(&cp, 0, sizeof(cp));
854
Johan Hedberg42c6b122013-03-05 20:37:49 +0200855 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200856 }
857 }
858
859 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200860 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200861
862 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200864
865 if (lmp_ext_feat_capable(hdev)) {
866 struct hci_cp_read_local_ext_features cp;
867
868 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200869 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
870 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200871 }
872
873 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
874 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200875 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
876 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200877 }
878}
879
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200881{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200882 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200883 struct hci_cp_write_def_link_policy cp;
884 u16 link_policy = 0;
885
886 if (lmp_rswitch_capable(hdev))
887 link_policy |= HCI_LP_RSWITCH;
888 if (lmp_hold_capable(hdev))
889 link_policy |= HCI_LP_HOLD;
890 if (lmp_sniff_capable(hdev))
891 link_policy |= HCI_LP_SNIFF;
892 if (lmp_park_capable(hdev))
893 link_policy |= HCI_LP_PARK;
894
895 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200896 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200897}
898
Johan Hedberg42c6b122013-03-05 20:37:49 +0200899static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200900{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200901 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200902 struct hci_cp_write_le_host_supported cp;
903
Johan Hedbergc73eee92013-04-19 18:35:21 +0300904 /* LE-only devices do not support explicit enablement */
905 if (!lmp_bredr_capable(hdev))
906 return;
907
Johan Hedberg2177bab2013-03-05 20:37:43 +0200908 memset(&cp, 0, sizeof(cp));
909
910 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
911 cp.le = 0x01;
912 cp.simul = lmp_le_br_capable(hdev);
913 }
914
915 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200916 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
917 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200918}
919
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300920static void hci_set_event_mask_page_2(struct hci_request *req)
921{
922 struct hci_dev *hdev = req->hdev;
923 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
924
925 /* If Connectionless Slave Broadcast master role is supported
926 * enable all necessary events for it.
927 */
928 if (hdev->features[2][0] & 0x01) {
929 events[1] |= 0x40; /* Triggered Clock Capture */
930 events[1] |= 0x80; /* Synchronization Train Complete */
931 events[2] |= 0x10; /* Slave Page Response Timeout */
932 events[2] |= 0x20; /* CSB Channel Map Change */
933 }
934
935 /* If Connectionless Slave Broadcast slave role is supported
936 * enable all necessary events for it.
937 */
938 if (hdev->features[2][0] & 0x02) {
939 events[2] |= 0x01; /* Synchronization Train Received */
940 events[2] |= 0x02; /* CSB Receive */
941 events[2] |= 0x04; /* CSB Timeout */
942 events[2] |= 0x08; /* Truncated Page Complete */
943 }
944
945 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
946}
947
Johan Hedberg42c6b122013-03-05 20:37:49 +0200948static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200949{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300951 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200952
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100953 /* Some Broadcom based Bluetooth controllers do not support the
954 * Delete Stored Link Key command. They are clearly indicating its
955 * absence in the bit mask of supported commands.
956 *
957 * Check the supported commands and only if the the command is marked
958 * as supported send it. If not supported assume that the controller
959 * does not have actual support for stored link keys which makes this
960 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700961 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300962 if (hdev->commands[6] & 0x80) {
963 struct hci_cp_delete_stored_link_key cp;
964
965 bacpy(&cp.bdaddr, BDADDR_ANY);
966 cp.delete_all = 0x01;
967 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
968 sizeof(cp), &cp);
969 }
970
Johan Hedberg2177bab2013-03-05 20:37:43 +0200971 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200972 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200973
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700974 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200975 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300976
977 /* Read features beyond page 1 if available */
978 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
979 struct hci_cp_read_local_ext_features cp;
980
981 cp.page = p;
982 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
983 sizeof(cp), &cp);
984 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200985}
986
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300987static void hci_init4_req(struct hci_request *req, unsigned long opt)
988{
989 struct hci_dev *hdev = req->hdev;
990
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300991 /* Set event mask page 2 if the HCI command for it is supported */
992 if (hdev->commands[22] & 0x04)
993 hci_set_event_mask_page_2(req);
994
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300995 /* Check for Synchronization Train support */
996 if (hdev->features[2][0] & 0x04)
997 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
998}
999
Johan Hedberg2177bab2013-03-05 20:37:43 +02001000static int __hci_init(struct hci_dev *hdev)
1001{
1002 int err;
1003
1004 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1005 if (err < 0)
1006 return err;
1007
1008 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1009 * BR/EDR/LE type controllers. AMP controllers only need the
1010 * first stage init.
1011 */
1012 if (hdev->dev_type != HCI_BREDR)
1013 return 0;
1014
1015 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1016 if (err < 0)
1017 return err;
1018
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001019 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1020 if (err < 0)
1021 return err;
1022
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001023 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1024 if (err < 0)
1025 return err;
1026
1027 /* Only create debugfs entries during the initial setup
1028 * phase and not every time the controller gets powered on.
1029 */
1030 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1031 return 0;
1032
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001033 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1034 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001035 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1036 &hdev->manufacturer);
1037 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1038 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001039 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1040 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001041 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1042
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001043 if (lmp_bredr_capable(hdev)) {
1044 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1045 hdev, &inquiry_cache_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001046 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1047 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001048 }
1049
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001050 if (lmp_ssp_capable(hdev))
1051 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1052 hdev, &auto_accept_delay_fops);
1053
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001054 if (lmp_sniff_capable(hdev)) {
1055 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1056 hdev, &idle_timeout_fops);
1057 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1058 hdev, &sniff_min_interval_fops);
1059 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1060 hdev, &sniff_max_interval_fops);
1061 }
1062
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001063 if (lmp_le_capable(hdev))
1064 debugfs_create_file("static_address", 0444, hdev->debugfs,
1065 hdev, &static_address_fops);
1066
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001067 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001068}
1069
Johan Hedberg42c6b122013-03-05 20:37:49 +02001070static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
1072 __u8 scan = opt;
1073
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
1076 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078}
1079
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081{
1082 __u8 auth = opt;
1083
Johan Hedberg42c6b122013-03-05 20:37:49 +02001084 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001087 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088}
1089
Johan Hedberg42c6b122013-03-05 20:37:49 +02001090static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091{
1092 __u8 encrypt = opt;
1093
Johan Hedberg42c6b122013-03-05 20:37:49 +02001094 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001096 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001097 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098}
1099
Johan Hedberg42c6b122013-03-05 20:37:49 +02001100static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001101{
1102 __le16 policy = cpu_to_le16(opt);
1103
Johan Hedberg42c6b122013-03-05 20:37:49 +02001104 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001105
1106 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001107 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001108}
1109
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001110/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 * Device is held on return. */
1112struct hci_dev *hci_dev_get(int index)
1113{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001114 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116 BT_DBG("%d", index);
1117
1118 if (index < 0)
1119 return NULL;
1120
1121 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001122 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 if (d->id == index) {
1124 hdev = hci_dev_hold(d);
1125 break;
1126 }
1127 }
1128 read_unlock(&hci_dev_list_lock);
1129 return hdev;
1130}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
1132/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001133
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001134bool hci_discovery_active(struct hci_dev *hdev)
1135{
1136 struct discovery_state *discov = &hdev->discovery;
1137
Andre Guedes6fbe1952012-02-03 17:47:58 -03001138 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001139 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001140 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001141 return true;
1142
Andre Guedes6fbe1952012-02-03 17:47:58 -03001143 default:
1144 return false;
1145 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001146}
1147
Johan Hedbergff9ef572012-01-04 14:23:45 +02001148void hci_discovery_set_state(struct hci_dev *hdev, int state)
1149{
1150 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1151
1152 if (hdev->discovery.state == state)
1153 return;
1154
1155 switch (state) {
1156 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001157 if (hdev->discovery.state != DISCOVERY_STARTING)
1158 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001159 break;
1160 case DISCOVERY_STARTING:
1161 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001162 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001163 mgmt_discovering(hdev, 1);
1164 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001165 case DISCOVERY_RESOLVING:
1166 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001167 case DISCOVERY_STOPPING:
1168 break;
1169 }
1170
1171 hdev->discovery.state = state;
1172}
1173
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001174void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175{
Johan Hedberg30883512012-01-04 14:16:21 +02001176 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001177 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Johan Hedberg561aafb2012-01-04 13:31:59 +02001179 list_for_each_entry_safe(p, n, &cache->all, all) {
1180 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001181 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001183
1184 INIT_LIST_HEAD(&cache->unknown);
1185 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186}
1187
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001188struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1189 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190{
Johan Hedberg30883512012-01-04 14:16:21 +02001191 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 struct inquiry_entry *e;
1193
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001194 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Johan Hedberg561aafb2012-01-04 13:31:59 +02001196 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001198 return e;
1199 }
1200
1201 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202}
1203
Johan Hedberg561aafb2012-01-04 13:31:59 +02001204struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001205 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001206{
Johan Hedberg30883512012-01-04 14:16:21 +02001207 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001208 struct inquiry_entry *e;
1209
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001210 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001211
1212 list_for_each_entry(e, &cache->unknown, list) {
1213 if (!bacmp(&e->data.bdaddr, bdaddr))
1214 return e;
1215 }
1216
1217 return NULL;
1218}
1219
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001220struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001221 bdaddr_t *bdaddr,
1222 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001223{
1224 struct discovery_state *cache = &hdev->discovery;
1225 struct inquiry_entry *e;
1226
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001227 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001228
1229 list_for_each_entry(e, &cache->resolve, list) {
1230 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1231 return e;
1232 if (!bacmp(&e->data.bdaddr, bdaddr))
1233 return e;
1234 }
1235
1236 return NULL;
1237}
1238
Johan Hedberga3d4e202012-01-09 00:53:02 +02001239void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001240 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001241{
1242 struct discovery_state *cache = &hdev->discovery;
1243 struct list_head *pos = &cache->resolve;
1244 struct inquiry_entry *p;
1245
1246 list_del(&ie->list);
1247
1248 list_for_each_entry(p, &cache->resolve, list) {
1249 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001250 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001251 break;
1252 pos = &p->list;
1253 }
1254
1255 list_add(&ie->list, pos);
1256}
1257
Johan Hedberg31754052012-01-04 13:39:52 +02001258bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001259 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260{
Johan Hedberg30883512012-01-04 14:16:21 +02001261 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001262 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001264 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
Szymon Janc2b2fec42012-11-20 11:38:54 +01001266 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1267
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001268 if (ssp)
1269 *ssp = data->ssp_mode;
1270
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001271 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001272 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001273 if (ie->data.ssp_mode && ssp)
1274 *ssp = true;
1275
Johan Hedberga3d4e202012-01-09 00:53:02 +02001276 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001277 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001278 ie->data.rssi = data->rssi;
1279 hci_inquiry_cache_update_resolve(hdev, ie);
1280 }
1281
Johan Hedberg561aafb2012-01-04 13:31:59 +02001282 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001283 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001284
Johan Hedberg561aafb2012-01-04 13:31:59 +02001285 /* Entry not in the cache. Add new one. */
1286 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1287 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001288 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001289
1290 list_add(&ie->all, &cache->all);
1291
1292 if (name_known) {
1293 ie->name_state = NAME_KNOWN;
1294 } else {
1295 ie->name_state = NAME_NOT_KNOWN;
1296 list_add(&ie->list, &cache->unknown);
1297 }
1298
1299update:
1300 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001301 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001302 ie->name_state = NAME_KNOWN;
1303 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 }
1305
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001306 memcpy(&ie->data, data, sizeof(*data));
1307 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001309
1310 if (ie->name_state == NAME_NOT_KNOWN)
1311 return false;
1312
1313 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314}
1315
1316static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1317{
Johan Hedberg30883512012-01-04 14:16:21 +02001318 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 struct inquiry_info *info = (struct inquiry_info *) buf;
1320 struct inquiry_entry *e;
1321 int copied = 0;
1322
Johan Hedberg561aafb2012-01-04 13:31:59 +02001323 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001325
1326 if (copied >= num)
1327 break;
1328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 bacpy(&info->bdaddr, &data->bdaddr);
1330 info->pscan_rep_mode = data->pscan_rep_mode;
1331 info->pscan_period_mode = data->pscan_period_mode;
1332 info->pscan_mode = data->pscan_mode;
1333 memcpy(info->dev_class, data->dev_class, 3);
1334 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001335
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001337 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 }
1339
1340 BT_DBG("cache %p, copied %d", cache, copied);
1341 return copied;
1342}
1343
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
1346 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 struct hci_cp_inquiry cp;
1349
1350 BT_DBG("%s", hdev->name);
1351
1352 if (test_bit(HCI_INQUIRY, &hdev->flags))
1353 return;
1354
1355 /* Start Inquiry */
1356 memcpy(&cp.lap, &ir->lap, 3);
1357 cp.length = ir->length;
1358 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360}
1361
Andre Guedes3e13fa12013-03-27 20:04:56 -03001362static int wait_inquiry(void *word)
1363{
1364 schedule();
1365 return signal_pending(current);
1366}
1367
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368int hci_inquiry(void __user *arg)
1369{
1370 __u8 __user *ptr = arg;
1371 struct hci_inquiry_req ir;
1372 struct hci_dev *hdev;
1373 int err = 0, do_inquiry = 0, max_rsp;
1374 long timeo;
1375 __u8 *buf;
1376
1377 if (copy_from_user(&ir, ptr, sizeof(ir)))
1378 return -EFAULT;
1379
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001380 hdev = hci_dev_get(ir.dev_id);
1381 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 return -ENODEV;
1383
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001384 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1385 err = -EBUSY;
1386 goto done;
1387 }
1388
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001389 if (hdev->dev_type != HCI_BREDR) {
1390 err = -EOPNOTSUPP;
1391 goto done;
1392 }
1393
Johan Hedberg56f87902013-10-02 13:43:13 +03001394 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1395 err = -EOPNOTSUPP;
1396 goto done;
1397 }
1398
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001399 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001400 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001401 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001402 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 do_inquiry = 1;
1404 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001405 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
Marcel Holtmann04837f62006-07-03 10:02:33 +02001407 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001408
1409 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001410 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1411 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001412 if (err < 0)
1413 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001414
1415 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1416 * cleared). If it is interrupted by a signal, return -EINTR.
1417 */
1418 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1419 TASK_INTERRUPTIBLE))
1420 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001421 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001423 /* for unlimited number of responses we will use buffer with
1424 * 255 entries
1425 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1427
1428 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1429 * copy it to the user space.
1430 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001431 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001432 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 err = -ENOMEM;
1434 goto done;
1435 }
1436
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001437 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001439 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
1441 BT_DBG("num_rsp %d", ir.num_rsp);
1442
1443 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1444 ptr += sizeof(ir);
1445 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001446 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001448 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 err = -EFAULT;
1450
1451 kfree(buf);
1452
1453done:
1454 hci_dev_put(hdev);
1455 return err;
1456}
1457
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001458static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 int ret = 0;
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 BT_DBG("%s %p", hdev->name, hdev);
1463
1464 hci_req_lock(hdev);
1465
Johan Hovold94324962012-03-15 14:48:41 +01001466 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1467 ret = -ENODEV;
1468 goto done;
1469 }
1470
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001471 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1472 /* Check for rfkill but allow the HCI setup stage to
1473 * proceed (which in itself doesn't cause any RF activity).
1474 */
1475 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1476 ret = -ERFKILL;
1477 goto done;
1478 }
1479
1480 /* Check for valid public address or a configured static
1481 * random adddress, but let the HCI setup proceed to
1482 * be able to determine if there is a public address
1483 * or not.
1484 *
1485 * This check is only valid for BR/EDR controllers
1486 * since AMP controllers do not have an address.
1487 */
1488 if (hdev->dev_type == HCI_BREDR &&
1489 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1490 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1491 ret = -EADDRNOTAVAIL;
1492 goto done;
1493 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001494 }
1495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 if (test_bit(HCI_UP, &hdev->flags)) {
1497 ret = -EALREADY;
1498 goto done;
1499 }
1500
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 if (hdev->open(hdev)) {
1502 ret = -EIO;
1503 goto done;
1504 }
1505
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001506 atomic_set(&hdev->cmd_cnt, 1);
1507 set_bit(HCI_INIT, &hdev->flags);
1508
1509 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1510 ret = hdev->setup(hdev);
1511
1512 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001513 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1514 set_bit(HCI_RAW, &hdev->flags);
1515
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001516 if (!test_bit(HCI_RAW, &hdev->flags) &&
1517 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001518 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 }
1520
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001521 clear_bit(HCI_INIT, &hdev->flags);
1522
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 if (!ret) {
1524 hci_dev_hold(hdev);
1525 set_bit(HCI_UP, &hdev->flags);
1526 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001527 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001528 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001529 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001530 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001531 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001532 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001533 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001534 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001536 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001537 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001538 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539
1540 skb_queue_purge(&hdev->cmd_q);
1541 skb_queue_purge(&hdev->rx_q);
1542
1543 if (hdev->flush)
1544 hdev->flush(hdev);
1545
1546 if (hdev->sent_cmd) {
1547 kfree_skb(hdev->sent_cmd);
1548 hdev->sent_cmd = NULL;
1549 }
1550
1551 hdev->close(hdev);
1552 hdev->flags = 0;
1553 }
1554
1555done:
1556 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 return ret;
1558}
1559
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001560/* ---- HCI ioctl helpers ---- */
1561
1562int hci_dev_open(__u16 dev)
1563{
1564 struct hci_dev *hdev;
1565 int err;
1566
1567 hdev = hci_dev_get(dev);
1568 if (!hdev)
1569 return -ENODEV;
1570
Johan Hedberge1d08f42013-10-01 22:44:50 +03001571 /* We need to ensure that no other power on/off work is pending
1572 * before proceeding to call hci_dev_do_open. This is
1573 * particularly important if the setup procedure has not yet
1574 * completed.
1575 */
1576 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1577 cancel_delayed_work(&hdev->power_off);
1578
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001579 /* After this call it is guaranteed that the setup procedure
1580 * has finished. This means that error conditions like RFKILL
1581 * or no valid public or static random address apply.
1582 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001583 flush_workqueue(hdev->req_workqueue);
1584
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001585 err = hci_dev_do_open(hdev);
1586
1587 hci_dev_put(hdev);
1588
1589 return err;
1590}
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592static int hci_dev_do_close(struct hci_dev *hdev)
1593{
1594 BT_DBG("%s %p", hdev->name, hdev);
1595
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001596 cancel_delayed_work(&hdev->power_off);
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 hci_req_cancel(hdev, ENODEV);
1599 hci_req_lock(hdev);
1600
1601 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001602 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 hci_req_unlock(hdev);
1604 return 0;
1605 }
1606
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001607 /* Flush RX and TX works */
1608 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001609 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001611 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001612 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001613 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001614 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001615 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001616 }
1617
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001618 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001619 cancel_delayed_work(&hdev->service_cache);
1620
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001621 cancel_delayed_work_sync(&hdev->le_scan_disable);
1622
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001623 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001624 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001626 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627
1628 hci_notify(hdev, HCI_DEV_DOWN);
1629
1630 if (hdev->flush)
1631 hdev->flush(hdev);
1632
1633 /* Reset device */
1634 skb_queue_purge(&hdev->cmd_q);
1635 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001636 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001637 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001638 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001640 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 clear_bit(HCI_INIT, &hdev->flags);
1642 }
1643
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001644 /* flush cmd work */
1645 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
1647 /* Drop queues */
1648 skb_queue_purge(&hdev->rx_q);
1649 skb_queue_purge(&hdev->cmd_q);
1650 skb_queue_purge(&hdev->raw_q);
1651
1652 /* Drop last sent command */
1653 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001654 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 kfree_skb(hdev->sent_cmd);
1656 hdev->sent_cmd = NULL;
1657 }
1658
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001659 kfree_skb(hdev->recv_evt);
1660 hdev->recv_evt = NULL;
1661
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 /* After this point our queues are empty
1663 * and no tasks are scheduled. */
1664 hdev->close(hdev);
1665
Johan Hedberg35b973c2013-03-15 17:06:59 -05001666 /* Clear flags */
1667 hdev->flags = 0;
1668 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1669
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001670 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1671 if (hdev->dev_type == HCI_BREDR) {
1672 hci_dev_lock(hdev);
1673 mgmt_powered(hdev, 0);
1674 hci_dev_unlock(hdev);
1675 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001676 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001677
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001678 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001679 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001680
Johan Hedberge59fda82012-02-22 18:11:53 +02001681 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001682 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001683
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 hci_req_unlock(hdev);
1685
1686 hci_dev_put(hdev);
1687 return 0;
1688}
1689
1690int hci_dev_close(__u16 dev)
1691{
1692 struct hci_dev *hdev;
1693 int err;
1694
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001695 hdev = hci_dev_get(dev);
1696 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001698
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001699 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1700 err = -EBUSY;
1701 goto done;
1702 }
1703
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001704 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1705 cancel_delayed_work(&hdev->power_off);
1706
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001708
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001709done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 hci_dev_put(hdev);
1711 return err;
1712}
1713
1714int hci_dev_reset(__u16 dev)
1715{
1716 struct hci_dev *hdev;
1717 int ret = 0;
1718
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001719 hdev = hci_dev_get(dev);
1720 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 return -ENODEV;
1722
1723 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Marcel Holtmann808a0492013-08-26 20:57:58 -07001725 if (!test_bit(HCI_UP, &hdev->flags)) {
1726 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001728 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001730 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1731 ret = -EBUSY;
1732 goto done;
1733 }
1734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 /* Drop queues */
1736 skb_queue_purge(&hdev->rx_q);
1737 skb_queue_purge(&hdev->cmd_q);
1738
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001739 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001740 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001742 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
1744 if (hdev->flush)
1745 hdev->flush(hdev);
1746
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001747 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001748 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
1750 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001751 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
1753done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 hci_req_unlock(hdev);
1755 hci_dev_put(hdev);
1756 return ret;
1757}
1758
1759int hci_dev_reset_stat(__u16 dev)
1760{
1761 struct hci_dev *hdev;
1762 int ret = 0;
1763
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001764 hdev = hci_dev_get(dev);
1765 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 return -ENODEV;
1767
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001768 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1769 ret = -EBUSY;
1770 goto done;
1771 }
1772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1774
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001775done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 return ret;
1778}
1779
1780int hci_dev_cmd(unsigned int cmd, void __user *arg)
1781{
1782 struct hci_dev *hdev;
1783 struct hci_dev_req dr;
1784 int err = 0;
1785
1786 if (copy_from_user(&dr, arg, sizeof(dr)))
1787 return -EFAULT;
1788
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001789 hdev = hci_dev_get(dr.dev_id);
1790 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 return -ENODEV;
1792
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001793 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1794 err = -EBUSY;
1795 goto done;
1796 }
1797
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001798 if (hdev->dev_type != HCI_BREDR) {
1799 err = -EOPNOTSUPP;
1800 goto done;
1801 }
1802
Johan Hedberg56f87902013-10-02 13:43:13 +03001803 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1804 err = -EOPNOTSUPP;
1805 goto done;
1806 }
1807
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 switch (cmd) {
1809 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001810 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1811 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 break;
1813
1814 case HCISETENCRYPT:
1815 if (!lmp_encrypt_capable(hdev)) {
1816 err = -EOPNOTSUPP;
1817 break;
1818 }
1819
1820 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1821 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001822 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1823 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 if (err)
1825 break;
1826 }
1827
Johan Hedberg01178cd2013-03-05 20:37:41 +02001828 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1829 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 break;
1831
1832 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001833 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1834 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 break;
1836
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001837 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001838 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1839 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001840 break;
1841
1842 case HCISETLINKMODE:
1843 hdev->link_mode = ((__u16) dr.dev_opt) &
1844 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1845 break;
1846
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 case HCISETPTYPE:
1848 hdev->pkt_type = (__u16) dr.dev_opt;
1849 break;
1850
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001852 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1853 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 break;
1855
1856 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001857 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1858 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 break;
1860
1861 default:
1862 err = -EINVAL;
1863 break;
1864 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001865
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001866done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 hci_dev_put(hdev);
1868 return err;
1869}
1870
1871int hci_get_dev_list(void __user *arg)
1872{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001873 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 struct hci_dev_list_req *dl;
1875 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 int n = 0, size, err;
1877 __u16 dev_num;
1878
1879 if (get_user(dev_num, (__u16 __user *) arg))
1880 return -EFAULT;
1881
1882 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1883 return -EINVAL;
1884
1885 size = sizeof(*dl) + dev_num * sizeof(*dr);
1886
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001887 dl = kzalloc(size, GFP_KERNEL);
1888 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 return -ENOMEM;
1890
1891 dr = dl->dev_req;
1892
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001893 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001894 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001895 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001896 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001897
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001898 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1899 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001900
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 (dr + n)->dev_id = hdev->id;
1902 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001903
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 if (++n >= dev_num)
1905 break;
1906 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001907 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
1909 dl->dev_num = n;
1910 size = sizeof(*dl) + n * sizeof(*dr);
1911
1912 err = copy_to_user(arg, dl, size);
1913 kfree(dl);
1914
1915 return err ? -EFAULT : 0;
1916}
1917
1918int hci_get_dev_info(void __user *arg)
1919{
1920 struct hci_dev *hdev;
1921 struct hci_dev_info di;
1922 int err = 0;
1923
1924 if (copy_from_user(&di, arg, sizeof(di)))
1925 return -EFAULT;
1926
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001927 hdev = hci_dev_get(di.dev_id);
1928 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 return -ENODEV;
1930
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001931 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001932 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001933
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001934 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1935 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001936
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 strcpy(di.name, hdev->name);
1938 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001939 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 di.flags = hdev->flags;
1941 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001942 if (lmp_bredr_capable(hdev)) {
1943 di.acl_mtu = hdev->acl_mtu;
1944 di.acl_pkts = hdev->acl_pkts;
1945 di.sco_mtu = hdev->sco_mtu;
1946 di.sco_pkts = hdev->sco_pkts;
1947 } else {
1948 di.acl_mtu = hdev->le_mtu;
1949 di.acl_pkts = hdev->le_pkts;
1950 di.sco_mtu = 0;
1951 di.sco_pkts = 0;
1952 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 di.link_policy = hdev->link_policy;
1954 di.link_mode = hdev->link_mode;
1955
1956 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1957 memcpy(&di.features, &hdev->features, sizeof(di.features));
1958
1959 if (copy_to_user(arg, &di, sizeof(di)))
1960 err = -EFAULT;
1961
1962 hci_dev_put(hdev);
1963
1964 return err;
1965}
1966
1967/* ---- Interface to HCI drivers ---- */
1968
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001969static int hci_rfkill_set_block(void *data, bool blocked)
1970{
1971 struct hci_dev *hdev = data;
1972
1973 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1974
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001975 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1976 return -EBUSY;
1977
Johan Hedberg5e130362013-09-13 08:58:17 +03001978 if (blocked) {
1979 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001980 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1981 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001982 } else {
1983 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001984 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001985
1986 return 0;
1987}
1988
1989static const struct rfkill_ops hci_rfkill_ops = {
1990 .set_block = hci_rfkill_set_block,
1991};
1992
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001993static void hci_power_on(struct work_struct *work)
1994{
1995 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001996 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001997
1998 BT_DBG("%s", hdev->name);
1999
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002000 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002001 if (err < 0) {
2002 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002003 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002004 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002005
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002006 /* During the HCI setup phase, a few error conditions are
2007 * ignored and they need to be checked now. If they are still
2008 * valid, it is important to turn the device back off.
2009 */
2010 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2011 (hdev->dev_type == HCI_BREDR &&
2012 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2013 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002014 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2015 hci_dev_do_close(hdev);
2016 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002017 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2018 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002019 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002020
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002021 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002022 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002023}
2024
2025static void hci_power_off(struct work_struct *work)
2026{
Johan Hedberg32435532011-11-07 22:16:04 +02002027 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002028 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002029
2030 BT_DBG("%s", hdev->name);
2031
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002032 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002033}
2034
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002035static void hci_discov_off(struct work_struct *work)
2036{
2037 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002038
2039 hdev = container_of(work, struct hci_dev, discov_off.work);
2040
2041 BT_DBG("%s", hdev->name);
2042
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002043 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002044}
2045
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002046int hci_uuids_clear(struct hci_dev *hdev)
2047{
Johan Hedberg48210022013-01-27 00:31:28 +02002048 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002049
Johan Hedberg48210022013-01-27 00:31:28 +02002050 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2051 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002052 kfree(uuid);
2053 }
2054
2055 return 0;
2056}
2057
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002058int hci_link_keys_clear(struct hci_dev *hdev)
2059{
2060 struct list_head *p, *n;
2061
2062 list_for_each_safe(p, n, &hdev->link_keys) {
2063 struct link_key *key;
2064
2065 key = list_entry(p, struct link_key, list);
2066
2067 list_del(p);
2068 kfree(key);
2069 }
2070
2071 return 0;
2072}
2073
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002074int hci_smp_ltks_clear(struct hci_dev *hdev)
2075{
2076 struct smp_ltk *k, *tmp;
2077
2078 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2079 list_del(&k->list);
2080 kfree(k);
2081 }
2082
2083 return 0;
2084}
2085
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002086struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2087{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002088 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002089
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002090 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002091 if (bacmp(bdaddr, &k->bdaddr) == 0)
2092 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002093
2094 return NULL;
2095}
2096
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302097static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002098 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002099{
2100 /* Legacy key */
2101 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302102 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002103
2104 /* Debug keys are insecure so don't store them persistently */
2105 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302106 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002107
2108 /* Changed combination key and there's no previous one */
2109 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302110 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002111
2112 /* Security mode 3 case */
2113 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302114 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002115
2116 /* Neither local nor remote side had no-bonding as requirement */
2117 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302118 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002119
2120 /* Local side had dedicated bonding as requirement */
2121 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302122 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002123
2124 /* Remote side had dedicated bonding as requirement */
2125 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302126 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002127
2128 /* If none of the above criteria match, then don't store the key
2129 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302130 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002131}
2132
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002133struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002134{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002135 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002136
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002137 list_for_each_entry(k, &hdev->long_term_keys, list) {
2138 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002139 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002140 continue;
2141
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002142 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002143 }
2144
2145 return NULL;
2146}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002147
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002148struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002149 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002150{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002151 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002152
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002153 list_for_each_entry(k, &hdev->long_term_keys, list)
2154 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002155 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002156 return k;
2157
2158 return NULL;
2159}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002160
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002161int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002162 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002163{
2164 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302165 u8 old_key_type;
2166 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002167
2168 old_key = hci_find_link_key(hdev, bdaddr);
2169 if (old_key) {
2170 old_key_type = old_key->type;
2171 key = old_key;
2172 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002173 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002174 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2175 if (!key)
2176 return -ENOMEM;
2177 list_add(&key->list, &hdev->link_keys);
2178 }
2179
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002180 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002181
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002182 /* Some buggy controller combinations generate a changed
2183 * combination key for legacy pairing even when there's no
2184 * previous key */
2185 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002186 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002187 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002188 if (conn)
2189 conn->key_type = type;
2190 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002191
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002192 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002193 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002194 key->pin_len = pin_len;
2195
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002196 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002197 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002198 else
2199 key->type = type;
2200
Johan Hedberg4df378a2011-04-28 11:29:03 -07002201 if (!new_key)
2202 return 0;
2203
2204 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2205
Johan Hedberg744cf192011-11-08 20:40:14 +02002206 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002207
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302208 if (conn)
2209 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002210
2211 return 0;
2212}
2213
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002214int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002215 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002216 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002217{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002218 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002219
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002220 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2221 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002222
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002223 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2224 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002225 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002226 else {
2227 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002228 if (!key)
2229 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002230 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002231 }
2232
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002233 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002234 key->bdaddr_type = addr_type;
2235 memcpy(key->val, tk, sizeof(key->val));
2236 key->authenticated = authenticated;
2237 key->ediv = ediv;
2238 key->enc_size = enc_size;
2239 key->type = type;
2240 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002241
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002242 if (!new_key)
2243 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002244
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002245 if (type & HCI_SMP_LTK)
2246 mgmt_new_ltk(hdev, key, 1);
2247
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002248 return 0;
2249}
2250
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002251int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2252{
2253 struct link_key *key;
2254
2255 key = hci_find_link_key(hdev, bdaddr);
2256 if (!key)
2257 return -ENOENT;
2258
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002259 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002260
2261 list_del(&key->list);
2262 kfree(key);
2263
2264 return 0;
2265}
2266
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002267int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2268{
2269 struct smp_ltk *k, *tmp;
2270
2271 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2272 if (bacmp(bdaddr, &k->bdaddr))
2273 continue;
2274
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002275 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002276
2277 list_del(&k->list);
2278 kfree(k);
2279 }
2280
2281 return 0;
2282}
2283
Ville Tervo6bd32322011-02-16 16:32:41 +02002284/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002285static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002286{
2287 struct hci_dev *hdev = (void *) arg;
2288
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002289 if (hdev->sent_cmd) {
2290 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2291 u16 opcode = __le16_to_cpu(sent->opcode);
2292
2293 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2294 } else {
2295 BT_ERR("%s command tx timeout", hdev->name);
2296 }
2297
Ville Tervo6bd32322011-02-16 16:32:41 +02002298 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002299 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002300}
2301
Szymon Janc2763eda2011-03-22 13:12:22 +01002302struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002303 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002304{
2305 struct oob_data *data;
2306
2307 list_for_each_entry(data, &hdev->remote_oob_data, list)
2308 if (bacmp(bdaddr, &data->bdaddr) == 0)
2309 return data;
2310
2311 return NULL;
2312}
2313
2314int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2315{
2316 struct oob_data *data;
2317
2318 data = hci_find_remote_oob_data(hdev, bdaddr);
2319 if (!data)
2320 return -ENOENT;
2321
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002322 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002323
2324 list_del(&data->list);
2325 kfree(data);
2326
2327 return 0;
2328}
2329
2330int hci_remote_oob_data_clear(struct hci_dev *hdev)
2331{
2332 struct oob_data *data, *n;
2333
2334 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2335 list_del(&data->list);
2336 kfree(data);
2337 }
2338
2339 return 0;
2340}
2341
2342int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002343 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002344{
2345 struct oob_data *data;
2346
2347 data = hci_find_remote_oob_data(hdev, bdaddr);
2348
2349 if (!data) {
2350 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2351 if (!data)
2352 return -ENOMEM;
2353
2354 bacpy(&data->bdaddr, bdaddr);
2355 list_add(&data->list, &hdev->remote_oob_data);
2356 }
2357
2358 memcpy(data->hash, hash, sizeof(data->hash));
2359 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2360
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002361 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002362
2363 return 0;
2364}
2365
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002366struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2367 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002368{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002369 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002370
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002371 list_for_each_entry(b, &hdev->blacklist, list) {
2372 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002373 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002374 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002375
2376 return NULL;
2377}
2378
2379int hci_blacklist_clear(struct hci_dev *hdev)
2380{
2381 struct list_head *p, *n;
2382
2383 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002384 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002385
2386 list_del(p);
2387 kfree(b);
2388 }
2389
2390 return 0;
2391}
2392
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002393int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002394{
2395 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002396
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002397 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002398 return -EBADF;
2399
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002400 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002401 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002402
2403 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002404 if (!entry)
2405 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002406
2407 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002408 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002409
2410 list_add(&entry->list, &hdev->blacklist);
2411
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002412 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002413}
2414
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002415int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002416{
2417 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002418
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002419 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002420 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002421
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002422 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002423 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002424 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002425
2426 list_del(&entry->list);
2427 kfree(entry);
2428
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002429 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002430}
2431
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002432static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002433{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002434 if (status) {
2435 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002436
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002437 hci_dev_lock(hdev);
2438 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2439 hci_dev_unlock(hdev);
2440 return;
2441 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002442}
2443
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002444static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002445{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002446 /* General inquiry access code (GIAC) */
2447 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2448 struct hci_request req;
2449 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002450 int err;
2451
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002452 if (status) {
2453 BT_ERR("Failed to disable LE scanning: status %d", status);
2454 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002455 }
2456
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002457 switch (hdev->discovery.type) {
2458 case DISCOV_TYPE_LE:
2459 hci_dev_lock(hdev);
2460 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2461 hci_dev_unlock(hdev);
2462 break;
2463
2464 case DISCOV_TYPE_INTERLEAVED:
2465 hci_req_init(&req, hdev);
2466
2467 memset(&cp, 0, sizeof(cp));
2468 memcpy(&cp.lap, lap, sizeof(cp.lap));
2469 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2470 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2471
2472 hci_dev_lock(hdev);
2473
2474 hci_inquiry_cache_flush(hdev);
2475
2476 err = hci_req_run(&req, inquiry_complete);
2477 if (err) {
2478 BT_ERR("Inquiry request failed: err %d", err);
2479 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2480 }
2481
2482 hci_dev_unlock(hdev);
2483 break;
2484 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002485}
2486
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002487static void le_scan_disable_work(struct work_struct *work)
2488{
2489 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002490 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002491 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002492 struct hci_request req;
2493 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002494
2495 BT_DBG("%s", hdev->name);
2496
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002497 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002498
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002499 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002500 cp.enable = LE_SCAN_DISABLE;
2501 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002502
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002503 err = hci_req_run(&req, le_scan_disable_work_complete);
2504 if (err)
2505 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002506}
2507
David Herrmann9be0dab2012-04-22 14:39:57 +02002508/* Alloc HCI device */
2509struct hci_dev *hci_alloc_dev(void)
2510{
2511 struct hci_dev *hdev;
2512
2513 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2514 if (!hdev)
2515 return NULL;
2516
David Herrmannb1b813d2012-04-22 14:39:58 +02002517 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2518 hdev->esco_type = (ESCO_HV1);
2519 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002520 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2521 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002522 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2523 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002524
David Herrmannb1b813d2012-04-22 14:39:58 +02002525 hdev->sniff_max_interval = 800;
2526 hdev->sniff_min_interval = 80;
2527
Marcel Holtmannbef64732013-10-11 08:23:19 -07002528 hdev->le_scan_interval = 0x0060;
2529 hdev->le_scan_window = 0x0030;
2530
David Herrmannb1b813d2012-04-22 14:39:58 +02002531 mutex_init(&hdev->lock);
2532 mutex_init(&hdev->req_lock);
2533
2534 INIT_LIST_HEAD(&hdev->mgmt_pending);
2535 INIT_LIST_HEAD(&hdev->blacklist);
2536 INIT_LIST_HEAD(&hdev->uuids);
2537 INIT_LIST_HEAD(&hdev->link_keys);
2538 INIT_LIST_HEAD(&hdev->long_term_keys);
2539 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002540 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002541
2542 INIT_WORK(&hdev->rx_work, hci_rx_work);
2543 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2544 INIT_WORK(&hdev->tx_work, hci_tx_work);
2545 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002546
David Herrmannb1b813d2012-04-22 14:39:58 +02002547 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2548 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2549 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2550
David Herrmannb1b813d2012-04-22 14:39:58 +02002551 skb_queue_head_init(&hdev->rx_q);
2552 skb_queue_head_init(&hdev->cmd_q);
2553 skb_queue_head_init(&hdev->raw_q);
2554
2555 init_waitqueue_head(&hdev->req_wait_q);
2556
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002557 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002558
David Herrmannb1b813d2012-04-22 14:39:58 +02002559 hci_init_sysfs(hdev);
2560 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002561
2562 return hdev;
2563}
2564EXPORT_SYMBOL(hci_alloc_dev);
2565
2566/* Free HCI device */
2567void hci_free_dev(struct hci_dev *hdev)
2568{
David Herrmann9be0dab2012-04-22 14:39:57 +02002569 /* will free via device release */
2570 put_device(&hdev->dev);
2571}
2572EXPORT_SYMBOL(hci_free_dev);
2573
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574/* Register HCI device */
2575int hci_register_dev(struct hci_dev *hdev)
2576{
David Herrmannb1b813d2012-04-22 14:39:58 +02002577 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578
David Herrmann010666a2012-01-07 15:47:07 +01002579 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 return -EINVAL;
2581
Mat Martineau08add512011-11-02 16:18:36 -07002582 /* Do not allow HCI_AMP devices to register at index 0,
2583 * so the index can be used as the AMP controller ID.
2584 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002585 switch (hdev->dev_type) {
2586 case HCI_BREDR:
2587 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2588 break;
2589 case HCI_AMP:
2590 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2591 break;
2592 default:
2593 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002595
Sasha Levin3df92b32012-05-27 22:36:56 +02002596 if (id < 0)
2597 return id;
2598
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 sprintf(hdev->name, "hci%d", id);
2600 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002601
2602 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2603
Kees Cookd8537542013-07-03 15:04:57 -07002604 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2605 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002606 if (!hdev->workqueue) {
2607 error = -ENOMEM;
2608 goto err;
2609 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002610
Kees Cookd8537542013-07-03 15:04:57 -07002611 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2612 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002613 if (!hdev->req_workqueue) {
2614 destroy_workqueue(hdev->workqueue);
2615 error = -ENOMEM;
2616 goto err;
2617 }
2618
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002619 if (!IS_ERR_OR_NULL(bt_debugfs))
2620 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2621
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002622 dev_set_name(&hdev->dev, "%s", hdev->name);
2623
2624 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002625 if (error < 0)
2626 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002628 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002629 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2630 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002631 if (hdev->rfkill) {
2632 if (rfkill_register(hdev->rfkill) < 0) {
2633 rfkill_destroy(hdev->rfkill);
2634 hdev->rfkill = NULL;
2635 }
2636 }
2637
Johan Hedberg5e130362013-09-13 08:58:17 +03002638 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2639 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2640
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002641 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002642 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002643
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002644 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002645 /* Assume BR/EDR support until proven otherwise (such as
2646 * through reading supported features during init.
2647 */
2648 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2649 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002650
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002651 write_lock(&hci_dev_list_lock);
2652 list_add(&hdev->list, &hci_dev_list);
2653 write_unlock(&hci_dev_list_lock);
2654
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002656 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
Johan Hedberg19202572013-01-14 22:33:51 +02002658 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002659
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002661
David Herrmann33ca9542011-10-08 14:58:49 +02002662err_wqueue:
2663 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002664 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002665err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002666 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002667
David Herrmann33ca9542011-10-08 14:58:49 +02002668 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669}
2670EXPORT_SYMBOL(hci_register_dev);
2671
2672/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002673void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674{
Sasha Levin3df92b32012-05-27 22:36:56 +02002675 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002676
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002677 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678
Johan Hovold94324962012-03-15 14:48:41 +01002679 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2680
Sasha Levin3df92b32012-05-27 22:36:56 +02002681 id = hdev->id;
2682
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002683 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002685 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
2687 hci_dev_do_close(hdev);
2688
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302689 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002690 kfree_skb(hdev->reassembly[i]);
2691
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002692 cancel_work_sync(&hdev->power_on);
2693
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002694 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002695 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002696 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002697 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002698 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002699 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002700
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002701 /* mgmt_index_removed should take care of emptying the
2702 * pending list */
2703 BUG_ON(!list_empty(&hdev->mgmt_pending));
2704
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 hci_notify(hdev, HCI_DEV_UNREG);
2706
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002707 if (hdev->rfkill) {
2708 rfkill_unregister(hdev->rfkill);
2709 rfkill_destroy(hdev->rfkill);
2710 }
2711
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002712 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08002713
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002714 debugfs_remove_recursive(hdev->debugfs);
2715
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002716 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002717 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002718
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002719 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002720 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002721 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002722 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002723 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002724 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002725 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002726
David Herrmanndc946bd2012-01-07 15:47:24 +01002727 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002728
2729 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730}
2731EXPORT_SYMBOL(hci_unregister_dev);
2732
2733/* Suspend HCI device */
2734int hci_suspend_dev(struct hci_dev *hdev)
2735{
2736 hci_notify(hdev, HCI_DEV_SUSPEND);
2737 return 0;
2738}
2739EXPORT_SYMBOL(hci_suspend_dev);
2740
2741/* Resume HCI device */
2742int hci_resume_dev(struct hci_dev *hdev)
2743{
2744 hci_notify(hdev, HCI_DEV_RESUME);
2745 return 0;
2746}
2747EXPORT_SYMBOL(hci_resume_dev);
2748
Marcel Holtmann76bca882009-11-18 00:40:39 +01002749/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002750int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002751{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002752 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002753 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002754 kfree_skb(skb);
2755 return -ENXIO;
2756 }
2757
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002758 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002759 bt_cb(skb)->incoming = 1;
2760
2761 /* Time stamp */
2762 __net_timestamp(skb);
2763
Marcel Holtmann76bca882009-11-18 00:40:39 +01002764 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002765 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002766
Marcel Holtmann76bca882009-11-18 00:40:39 +01002767 return 0;
2768}
2769EXPORT_SYMBOL(hci_recv_frame);
2770
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302771static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002772 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302773{
2774 int len = 0;
2775 int hlen = 0;
2776 int remain = count;
2777 struct sk_buff *skb;
2778 struct bt_skb_cb *scb;
2779
2780 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002781 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302782 return -EILSEQ;
2783
2784 skb = hdev->reassembly[index];
2785
2786 if (!skb) {
2787 switch (type) {
2788 case HCI_ACLDATA_PKT:
2789 len = HCI_MAX_FRAME_SIZE;
2790 hlen = HCI_ACL_HDR_SIZE;
2791 break;
2792 case HCI_EVENT_PKT:
2793 len = HCI_MAX_EVENT_SIZE;
2794 hlen = HCI_EVENT_HDR_SIZE;
2795 break;
2796 case HCI_SCODATA_PKT:
2797 len = HCI_MAX_SCO_SIZE;
2798 hlen = HCI_SCO_HDR_SIZE;
2799 break;
2800 }
2801
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002802 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302803 if (!skb)
2804 return -ENOMEM;
2805
2806 scb = (void *) skb->cb;
2807 scb->expect = hlen;
2808 scb->pkt_type = type;
2809
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302810 hdev->reassembly[index] = skb;
2811 }
2812
2813 while (count) {
2814 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002815 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302816
2817 memcpy(skb_put(skb, len), data, len);
2818
2819 count -= len;
2820 data += len;
2821 scb->expect -= len;
2822 remain = count;
2823
2824 switch (type) {
2825 case HCI_EVENT_PKT:
2826 if (skb->len == HCI_EVENT_HDR_SIZE) {
2827 struct hci_event_hdr *h = hci_event_hdr(skb);
2828 scb->expect = h->plen;
2829
2830 if (skb_tailroom(skb) < scb->expect) {
2831 kfree_skb(skb);
2832 hdev->reassembly[index] = NULL;
2833 return -ENOMEM;
2834 }
2835 }
2836 break;
2837
2838 case HCI_ACLDATA_PKT:
2839 if (skb->len == HCI_ACL_HDR_SIZE) {
2840 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2841 scb->expect = __le16_to_cpu(h->dlen);
2842
2843 if (skb_tailroom(skb) < scb->expect) {
2844 kfree_skb(skb);
2845 hdev->reassembly[index] = NULL;
2846 return -ENOMEM;
2847 }
2848 }
2849 break;
2850
2851 case HCI_SCODATA_PKT:
2852 if (skb->len == HCI_SCO_HDR_SIZE) {
2853 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2854 scb->expect = h->dlen;
2855
2856 if (skb_tailroom(skb) < scb->expect) {
2857 kfree_skb(skb);
2858 hdev->reassembly[index] = NULL;
2859 return -ENOMEM;
2860 }
2861 }
2862 break;
2863 }
2864
2865 if (scb->expect == 0) {
2866 /* Complete frame */
2867
2868 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002869 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302870
2871 hdev->reassembly[index] = NULL;
2872 return remain;
2873 }
2874 }
2875
2876 return remain;
2877}
2878
Marcel Holtmannef222012007-07-11 06:42:04 +02002879int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2880{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302881 int rem = 0;
2882
Marcel Holtmannef222012007-07-11 06:42:04 +02002883 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2884 return -EILSEQ;
2885
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002886 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002887 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302888 if (rem < 0)
2889 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002890
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302891 data += (count - rem);
2892 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002893 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002894
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302895 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002896}
2897EXPORT_SYMBOL(hci_recv_fragment);
2898
Suraj Sumangala99811512010-07-14 13:02:19 +05302899#define STREAM_REASSEMBLY 0
2900
2901int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2902{
2903 int type;
2904 int rem = 0;
2905
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002906 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302907 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2908
2909 if (!skb) {
2910 struct { char type; } *pkt;
2911
2912 /* Start of the frame */
2913 pkt = data;
2914 type = pkt->type;
2915
2916 data++;
2917 count--;
2918 } else
2919 type = bt_cb(skb)->pkt_type;
2920
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002921 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002922 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302923 if (rem < 0)
2924 return rem;
2925
2926 data += (count - rem);
2927 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002928 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302929
2930 return rem;
2931}
2932EXPORT_SYMBOL(hci_recv_stream_fragment);
2933
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934/* ---- Interface to upper protocols ---- */
2935
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936int hci_register_cb(struct hci_cb *cb)
2937{
2938 BT_DBG("%p name %s", cb, cb->name);
2939
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002940 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002942 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
2944 return 0;
2945}
2946EXPORT_SYMBOL(hci_register_cb);
2947
2948int hci_unregister_cb(struct hci_cb *cb)
2949{
2950 BT_DBG("%p name %s", cb, cb->name);
2951
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002952 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002954 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955
2956 return 0;
2957}
2958EXPORT_SYMBOL(hci_unregister_cb);
2959
Marcel Holtmann51086992013-10-10 14:54:19 -07002960static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002962 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002964 /* Time stamp */
2965 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002967 /* Send copy to monitor */
2968 hci_send_to_monitor(hdev, skb);
2969
2970 if (atomic_read(&hdev->promisc)) {
2971 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002972 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 }
2974
2975 /* Get rid of skb owner, prior to sending to the driver. */
2976 skb_orphan(skb);
2977
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002978 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002979 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980}
2981
Johan Hedberg3119ae92013-03-05 20:37:44 +02002982void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2983{
2984 skb_queue_head_init(&req->cmd_q);
2985 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002986 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002987}
2988
2989int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2990{
2991 struct hci_dev *hdev = req->hdev;
2992 struct sk_buff *skb;
2993 unsigned long flags;
2994
2995 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2996
Andre Guedes5d73e032013-03-08 11:20:16 -03002997 /* If an error occured during request building, remove all HCI
2998 * commands queued on the HCI request queue.
2999 */
3000 if (req->err) {
3001 skb_queue_purge(&req->cmd_q);
3002 return req->err;
3003 }
3004
Johan Hedberg3119ae92013-03-05 20:37:44 +02003005 /* Do not allow empty requests */
3006 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003007 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003008
3009 skb = skb_peek_tail(&req->cmd_q);
3010 bt_cb(skb)->req.complete = complete;
3011
3012 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3013 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3014 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3015
3016 queue_work(hdev->workqueue, &hdev->cmd_work);
3017
3018 return 0;
3019}
3020
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003021static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003022 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023{
3024 int len = HCI_COMMAND_HDR_SIZE + plen;
3025 struct hci_command_hdr *hdr;
3026 struct sk_buff *skb;
3027
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003029 if (!skb)
3030 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031
3032 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003033 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 hdr->plen = plen;
3035
3036 if (plen)
3037 memcpy(skb_put(skb, plen), param, plen);
3038
3039 BT_DBG("skb len %d", skb->len);
3040
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003041 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003042
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003043 return skb;
3044}
3045
3046/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003047int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3048 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003049{
3050 struct sk_buff *skb;
3051
3052 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3053
3054 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3055 if (!skb) {
3056 BT_ERR("%s no memory for command", hdev->name);
3057 return -ENOMEM;
3058 }
3059
Johan Hedberg11714b32013-03-05 20:37:47 +02003060 /* Stand-alone HCI commands must be flaged as
3061 * single-command requests.
3062 */
3063 bt_cb(skb)->req.start = true;
3064
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003066 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067
3068 return 0;
3069}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070
Johan Hedberg71c76a12013-03-05 20:37:46 +02003071/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003072void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3073 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003074{
3075 struct hci_dev *hdev = req->hdev;
3076 struct sk_buff *skb;
3077
3078 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3079
Andre Guedes34739c12013-03-08 11:20:18 -03003080 /* If an error occured during request building, there is no point in
3081 * queueing the HCI command. We can simply return.
3082 */
3083 if (req->err)
3084 return;
3085
Johan Hedberg71c76a12013-03-05 20:37:46 +02003086 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3087 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003088 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3089 hdev->name, opcode);
3090 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003091 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003092 }
3093
3094 if (skb_queue_empty(&req->cmd_q))
3095 bt_cb(skb)->req.start = true;
3096
Johan Hedberg02350a72013-04-03 21:50:29 +03003097 bt_cb(skb)->req.event = event;
3098
Johan Hedberg71c76a12013-03-05 20:37:46 +02003099 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003100}
3101
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003102void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3103 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003104{
3105 hci_req_add_ev(req, opcode, plen, param, 0);
3106}
3107
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003109void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110{
3111 struct hci_command_hdr *hdr;
3112
3113 if (!hdev->sent_cmd)
3114 return NULL;
3115
3116 hdr = (void *) hdev->sent_cmd->data;
3117
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003118 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 return NULL;
3120
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003121 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122
3123 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3124}
3125
3126/* Send ACL data */
3127static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3128{
3129 struct hci_acl_hdr *hdr;
3130 int len = skb->len;
3131
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003132 skb_push(skb, HCI_ACL_HDR_SIZE);
3133 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003134 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003135 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3136 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137}
3138
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003139static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003140 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003142 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 struct hci_dev *hdev = conn->hdev;
3144 struct sk_buff *list;
3145
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003146 skb->len = skb_headlen(skb);
3147 skb->data_len = 0;
3148
3149 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003150
3151 switch (hdev->dev_type) {
3152 case HCI_BREDR:
3153 hci_add_acl_hdr(skb, conn->handle, flags);
3154 break;
3155 case HCI_AMP:
3156 hci_add_acl_hdr(skb, chan->handle, flags);
3157 break;
3158 default:
3159 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3160 return;
3161 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003162
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003163 list = skb_shinfo(skb)->frag_list;
3164 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 /* Non fragmented */
3166 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3167
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003168 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 } else {
3170 /* Fragmented */
3171 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3172
3173 skb_shinfo(skb)->frag_list = NULL;
3174
3175 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003176 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003178 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003179
3180 flags &= ~ACL_START;
3181 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 do {
3183 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003184
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003185 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003186 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187
3188 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3189
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003190 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 } while (list);
3192
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003193 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003195}
3196
3197void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3198{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003199 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003200
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003201 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003202
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003203 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003205 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
3208/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003209void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210{
3211 struct hci_dev *hdev = conn->hdev;
3212 struct hci_sco_hdr hdr;
3213
3214 BT_DBG("%s len %d", hdev->name, skb->len);
3215
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003216 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217 hdr.dlen = skb->len;
3218
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003219 skb_push(skb, HCI_SCO_HDR_SIZE);
3220 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003221 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003223 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003224
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003226 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228
3229/* ---- HCI TX task (outgoing data) ---- */
3230
3231/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003232static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3233 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234{
3235 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003236 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003237 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003239 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003241
3242 rcu_read_lock();
3243
3244 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003245 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003247
3248 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3249 continue;
3250
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251 num++;
3252
3253 if (c->sent < min) {
3254 min = c->sent;
3255 conn = c;
3256 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003257
3258 if (hci_conn_num(hdev, type) == num)
3259 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 }
3261
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003262 rcu_read_unlock();
3263
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003265 int cnt, q;
3266
3267 switch (conn->type) {
3268 case ACL_LINK:
3269 cnt = hdev->acl_cnt;
3270 break;
3271 case SCO_LINK:
3272 case ESCO_LINK:
3273 cnt = hdev->sco_cnt;
3274 break;
3275 case LE_LINK:
3276 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3277 break;
3278 default:
3279 cnt = 0;
3280 BT_ERR("Unknown link type");
3281 }
3282
3283 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284 *quote = q ? q : 1;
3285 } else
3286 *quote = 0;
3287
3288 BT_DBG("conn %p quote %d", conn, *quote);
3289 return conn;
3290}
3291
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003292static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293{
3294 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003295 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296
Ville Tervobae1f5d92011-02-10 22:38:53 -03003297 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003299 rcu_read_lock();
3300
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003302 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003303 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003304 BT_ERR("%s killing stalled connection %pMR",
3305 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003306 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 }
3308 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003309
3310 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311}
3312
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003313static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3314 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003315{
3316 struct hci_conn_hash *h = &hdev->conn_hash;
3317 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003318 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003319 struct hci_conn *conn;
3320 int cnt, q, conn_num = 0;
3321
3322 BT_DBG("%s", hdev->name);
3323
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003324 rcu_read_lock();
3325
3326 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003327 struct hci_chan *tmp;
3328
3329 if (conn->type != type)
3330 continue;
3331
3332 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3333 continue;
3334
3335 conn_num++;
3336
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003337 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003338 struct sk_buff *skb;
3339
3340 if (skb_queue_empty(&tmp->data_q))
3341 continue;
3342
3343 skb = skb_peek(&tmp->data_q);
3344 if (skb->priority < cur_prio)
3345 continue;
3346
3347 if (skb->priority > cur_prio) {
3348 num = 0;
3349 min = ~0;
3350 cur_prio = skb->priority;
3351 }
3352
3353 num++;
3354
3355 if (conn->sent < min) {
3356 min = conn->sent;
3357 chan = tmp;
3358 }
3359 }
3360
3361 if (hci_conn_num(hdev, type) == conn_num)
3362 break;
3363 }
3364
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003365 rcu_read_unlock();
3366
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003367 if (!chan)
3368 return NULL;
3369
3370 switch (chan->conn->type) {
3371 case ACL_LINK:
3372 cnt = hdev->acl_cnt;
3373 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003374 case AMP_LINK:
3375 cnt = hdev->block_cnt;
3376 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003377 case SCO_LINK:
3378 case ESCO_LINK:
3379 cnt = hdev->sco_cnt;
3380 break;
3381 case LE_LINK:
3382 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3383 break;
3384 default:
3385 cnt = 0;
3386 BT_ERR("Unknown link type");
3387 }
3388
3389 q = cnt / num;
3390 *quote = q ? q : 1;
3391 BT_DBG("chan %p quote %d", chan, *quote);
3392 return chan;
3393}
3394
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003395static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3396{
3397 struct hci_conn_hash *h = &hdev->conn_hash;
3398 struct hci_conn *conn;
3399 int num = 0;
3400
3401 BT_DBG("%s", hdev->name);
3402
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003403 rcu_read_lock();
3404
3405 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003406 struct hci_chan *chan;
3407
3408 if (conn->type != type)
3409 continue;
3410
3411 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3412 continue;
3413
3414 num++;
3415
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003416 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003417 struct sk_buff *skb;
3418
3419 if (chan->sent) {
3420 chan->sent = 0;
3421 continue;
3422 }
3423
3424 if (skb_queue_empty(&chan->data_q))
3425 continue;
3426
3427 skb = skb_peek(&chan->data_q);
3428 if (skb->priority >= HCI_PRIO_MAX - 1)
3429 continue;
3430
3431 skb->priority = HCI_PRIO_MAX - 1;
3432
3433 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003434 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003435 }
3436
3437 if (hci_conn_num(hdev, type) == num)
3438 break;
3439 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003440
3441 rcu_read_unlock();
3442
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003443}
3444
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003445static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3446{
3447 /* Calculate count of blocks used by this packet */
3448 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3449}
3450
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003451static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 if (!test_bit(HCI_RAW, &hdev->flags)) {
3454 /* ACL tx timeout must be longer than maximum
3455 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003456 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003457 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003458 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003460}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003462static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003463{
3464 unsigned int cnt = hdev->acl_cnt;
3465 struct hci_chan *chan;
3466 struct sk_buff *skb;
3467 int quote;
3468
3469 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003470
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003471 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003472 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003473 u32 priority = (skb_peek(&chan->data_q))->priority;
3474 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003475 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003476 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003477
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003478 /* Stop if priority has changed */
3479 if (skb->priority < priority)
3480 break;
3481
3482 skb = skb_dequeue(&chan->data_q);
3483
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003484 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003485 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003486
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003487 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 hdev->acl_last_tx = jiffies;
3489
3490 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003491 chan->sent++;
3492 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 }
3494 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003495
3496 if (cnt != hdev->acl_cnt)
3497 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498}
3499
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003500static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003501{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003502 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003503 struct hci_chan *chan;
3504 struct sk_buff *skb;
3505 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003506 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003507
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003508 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003509
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003510 BT_DBG("%s", hdev->name);
3511
3512 if (hdev->dev_type == HCI_AMP)
3513 type = AMP_LINK;
3514 else
3515 type = ACL_LINK;
3516
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003517 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003518 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003519 u32 priority = (skb_peek(&chan->data_q))->priority;
3520 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3521 int blocks;
3522
3523 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003524 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003525
3526 /* Stop if priority has changed */
3527 if (skb->priority < priority)
3528 break;
3529
3530 skb = skb_dequeue(&chan->data_q);
3531
3532 blocks = __get_blocks(hdev, skb);
3533 if (blocks > hdev->block_cnt)
3534 return;
3535
3536 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003537 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003538
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003539 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003540 hdev->acl_last_tx = jiffies;
3541
3542 hdev->block_cnt -= blocks;
3543 quote -= blocks;
3544
3545 chan->sent += blocks;
3546 chan->conn->sent += blocks;
3547 }
3548 }
3549
3550 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003551 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003552}
3553
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003554static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003555{
3556 BT_DBG("%s", hdev->name);
3557
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003558 /* No ACL link over BR/EDR controller */
3559 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3560 return;
3561
3562 /* No AMP link over AMP controller */
3563 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003564 return;
3565
3566 switch (hdev->flow_ctl_mode) {
3567 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3568 hci_sched_acl_pkt(hdev);
3569 break;
3570
3571 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3572 hci_sched_acl_blk(hdev);
3573 break;
3574 }
3575}
3576
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003578static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579{
3580 struct hci_conn *conn;
3581 struct sk_buff *skb;
3582 int quote;
3583
3584 BT_DBG("%s", hdev->name);
3585
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003586 if (!hci_conn_num(hdev, SCO_LINK))
3587 return;
3588
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3590 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3591 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003592 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593
3594 conn->sent++;
3595 if (conn->sent == ~0)
3596 conn->sent = 0;
3597 }
3598 }
3599}
3600
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003601static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003602{
3603 struct hci_conn *conn;
3604 struct sk_buff *skb;
3605 int quote;
3606
3607 BT_DBG("%s", hdev->name);
3608
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003609 if (!hci_conn_num(hdev, ESCO_LINK))
3610 return;
3611
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003612 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3613 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003614 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3615 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003616 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003617
3618 conn->sent++;
3619 if (conn->sent == ~0)
3620 conn->sent = 0;
3621 }
3622 }
3623}
3624
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003625static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003626{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003627 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003628 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003629 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003630
3631 BT_DBG("%s", hdev->name);
3632
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003633 if (!hci_conn_num(hdev, LE_LINK))
3634 return;
3635
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003636 if (!test_bit(HCI_RAW, &hdev->flags)) {
3637 /* LE tx timeout must be longer than maximum
3638 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003639 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003640 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003641 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003642 }
3643
3644 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003645 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003646 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003647 u32 priority = (skb_peek(&chan->data_q))->priority;
3648 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003649 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003650 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003651
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003652 /* Stop if priority has changed */
3653 if (skb->priority < priority)
3654 break;
3655
3656 skb = skb_dequeue(&chan->data_q);
3657
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003658 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003659 hdev->le_last_tx = jiffies;
3660
3661 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003662 chan->sent++;
3663 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003664 }
3665 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003666
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003667 if (hdev->le_pkts)
3668 hdev->le_cnt = cnt;
3669 else
3670 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003671
3672 if (cnt != tmp)
3673 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003674}
3675
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003676static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003678 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 struct sk_buff *skb;
3680
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003681 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003682 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683
Marcel Holtmann52de5992013-09-03 18:08:38 -07003684 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3685 /* Schedule queues and send stuff to HCI driver */
3686 hci_sched_acl(hdev);
3687 hci_sched_sco(hdev);
3688 hci_sched_esco(hdev);
3689 hci_sched_le(hdev);
3690 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003691
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 /* Send next queued raw (unknown type) packet */
3693 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003694 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695}
3696
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003697/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698
3699/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003700static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701{
3702 struct hci_acl_hdr *hdr = (void *) skb->data;
3703 struct hci_conn *conn;
3704 __u16 handle, flags;
3705
3706 skb_pull(skb, HCI_ACL_HDR_SIZE);
3707
3708 handle = __le16_to_cpu(hdr->handle);
3709 flags = hci_flags(handle);
3710 handle = hci_handle(handle);
3711
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003712 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003713 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714
3715 hdev->stat.acl_rx++;
3716
3717 hci_dev_lock(hdev);
3718 conn = hci_conn_hash_lookup_handle(hdev, handle);
3719 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003720
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003722 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003723
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003725 l2cap_recv_acldata(conn, skb, flags);
3726 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003728 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003729 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730 }
3731
3732 kfree_skb(skb);
3733}
3734
3735/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003736static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737{
3738 struct hci_sco_hdr *hdr = (void *) skb->data;
3739 struct hci_conn *conn;
3740 __u16 handle;
3741
3742 skb_pull(skb, HCI_SCO_HDR_SIZE);
3743
3744 handle = __le16_to_cpu(hdr->handle);
3745
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003746 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747
3748 hdev->stat.sco_rx++;
3749
3750 hci_dev_lock(hdev);
3751 conn = hci_conn_hash_lookup_handle(hdev, handle);
3752 hci_dev_unlock(hdev);
3753
3754 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003756 sco_recv_scodata(conn, skb);
3757 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003759 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003760 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761 }
3762
3763 kfree_skb(skb);
3764}
3765
Johan Hedberg9238f362013-03-05 20:37:48 +02003766static bool hci_req_is_complete(struct hci_dev *hdev)
3767{
3768 struct sk_buff *skb;
3769
3770 skb = skb_peek(&hdev->cmd_q);
3771 if (!skb)
3772 return true;
3773
3774 return bt_cb(skb)->req.start;
3775}
3776
Johan Hedberg42c6b122013-03-05 20:37:49 +02003777static void hci_resend_last(struct hci_dev *hdev)
3778{
3779 struct hci_command_hdr *sent;
3780 struct sk_buff *skb;
3781 u16 opcode;
3782
3783 if (!hdev->sent_cmd)
3784 return;
3785
3786 sent = (void *) hdev->sent_cmd->data;
3787 opcode = __le16_to_cpu(sent->opcode);
3788 if (opcode == HCI_OP_RESET)
3789 return;
3790
3791 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3792 if (!skb)
3793 return;
3794
3795 skb_queue_head(&hdev->cmd_q, skb);
3796 queue_work(hdev->workqueue, &hdev->cmd_work);
3797}
3798
Johan Hedberg9238f362013-03-05 20:37:48 +02003799void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3800{
3801 hci_req_complete_t req_complete = NULL;
3802 struct sk_buff *skb;
3803 unsigned long flags;
3804
3805 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3806
Johan Hedberg42c6b122013-03-05 20:37:49 +02003807 /* If the completed command doesn't match the last one that was
3808 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003809 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003810 if (!hci_sent_cmd_data(hdev, opcode)) {
3811 /* Some CSR based controllers generate a spontaneous
3812 * reset complete event during init and any pending
3813 * command will never be completed. In such a case we
3814 * need to resend whatever was the last sent
3815 * command.
3816 */
3817 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3818 hci_resend_last(hdev);
3819
Johan Hedberg9238f362013-03-05 20:37:48 +02003820 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003821 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003822
3823 /* If the command succeeded and there's still more commands in
3824 * this request the request is not yet complete.
3825 */
3826 if (!status && !hci_req_is_complete(hdev))
3827 return;
3828
3829 /* If this was the last command in a request the complete
3830 * callback would be found in hdev->sent_cmd instead of the
3831 * command queue (hdev->cmd_q).
3832 */
3833 if (hdev->sent_cmd) {
3834 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003835
3836 if (req_complete) {
3837 /* We must set the complete callback to NULL to
3838 * avoid calling the callback more than once if
3839 * this function gets called again.
3840 */
3841 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3842
Johan Hedberg9238f362013-03-05 20:37:48 +02003843 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003844 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003845 }
3846
3847 /* Remove all pending commands belonging to this request */
3848 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3849 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3850 if (bt_cb(skb)->req.start) {
3851 __skb_queue_head(&hdev->cmd_q, skb);
3852 break;
3853 }
3854
3855 req_complete = bt_cb(skb)->req.complete;
3856 kfree_skb(skb);
3857 }
3858 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3859
3860call_complete:
3861 if (req_complete)
3862 req_complete(hdev, status);
3863}
3864
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003865static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003867 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003868 struct sk_buff *skb;
3869
3870 BT_DBG("%s", hdev->name);
3871
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003873 /* Send copy to monitor */
3874 hci_send_to_monitor(hdev, skb);
3875
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 if (atomic_read(&hdev->promisc)) {
3877 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003878 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879 }
3880
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003881 if (test_bit(HCI_RAW, &hdev->flags) ||
3882 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 kfree_skb(skb);
3884 continue;
3885 }
3886
3887 if (test_bit(HCI_INIT, &hdev->flags)) {
3888 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003889 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890 case HCI_ACLDATA_PKT:
3891 case HCI_SCODATA_PKT:
3892 kfree_skb(skb);
3893 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895 }
3896
3897 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003898 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003900 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 hci_event_packet(hdev, skb);
3902 break;
3903
3904 case HCI_ACLDATA_PKT:
3905 BT_DBG("%s ACL data packet", hdev->name);
3906 hci_acldata_packet(hdev, skb);
3907 break;
3908
3909 case HCI_SCODATA_PKT:
3910 BT_DBG("%s SCO data packet", hdev->name);
3911 hci_scodata_packet(hdev, skb);
3912 break;
3913
3914 default:
3915 kfree_skb(skb);
3916 break;
3917 }
3918 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919}
3920
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003921static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003923 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 struct sk_buff *skb;
3925
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003926 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3927 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003930 if (atomic_read(&hdev->cmd_cnt)) {
3931 skb = skb_dequeue(&hdev->cmd_q);
3932 if (!skb)
3933 return;
3934
Wei Yongjun7585b972009-02-25 18:29:52 +08003935 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003937 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003938 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003940 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003941 if (test_bit(HCI_RESET, &hdev->flags))
3942 del_timer(&hdev->cmd_timer);
3943 else
3944 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003945 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946 } else {
3947 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003948 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 }
3950 }
3951}