blob: b281c235d8984f732f4b35b588bec1a18f949e8d [file] [log] [blame]
Oren Weilab841162011-05-15 13:43:41 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilab841162011-05-15 13:43:41 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
Tomas Winkler2f3d2b42012-03-19 22:38:13 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
Oren Weilab841162011-05-15 13:43:41 +030019#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/device.h>
23#include <linux/fs.h>
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/aio.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/ioctl.h>
32#include <linux/cdev.h>
Oren Weilab841162011-05-15 13:43:41 +030033#include <linux/sched.h>
34#include <linux/uuid.h>
35#include <linux/compat.h>
36#include <linux/jiffies.h>
37#include <linux/interrupt.h>
Oren Weil5b881e32011-11-13 09:41:14 +020038#include <linux/miscdevice.h>
Oren Weilab841162011-05-15 13:43:41 +030039
40#include "mei_dev.h"
Tomas Winkler4f3afe12012-05-09 16:38:59 +030041#include <linux/mei.h>
Oren Weilab841162011-05-15 13:43:41 +030042#include "interface.h"
Oren Weilab841162011-05-15 13:43:41 +030043
Tomas Winklerdaed6b52012-08-17 09:54:23 +030044/* AMT device is a singleton on the platform */
45static struct pci_dev *mei_pdev;
Oren Weilab841162011-05-15 13:43:41 +030046
Oren Weilab841162011-05-15 13:43:41 +030047/* mei_pci_tbl - PCI Device ID Table */
48static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
49 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
50 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
51 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
58 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
59 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
60 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
61 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
62 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
63 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
64 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
65 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
67 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
68 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
69 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
71 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
72 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
73 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
74 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
75 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
Tomas Winkler9af51422012-08-29 01:15:50 +030080 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
Oren Weilab841162011-05-15 13:43:41 +030082
83 /* required last entry */
84 {0, }
85};
86
87MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
88
89static DEFINE_MUTEX(mei_mutex);
90
Oren Weilab841162011-05-15 13:43:41 +030091
92/**
Oren Weilab841162011-05-15 13:43:41 +030093 * find_read_list_entry - find read list entry
94 *
95 * @dev: device structure
96 * @file: pointer to file structure
97 *
98 * returns cb on success, NULL on error
99 */
100static struct mei_cl_cb *find_read_list_entry(
101 struct mei_device *dev,
102 struct mei_cl *cl)
103{
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200104 struct mei_cl_cb *pos = NULL;
105 struct mei_cl_cb *next = NULL;
Oren Weilab841162011-05-15 13:43:41 +0300106
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200107 dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
Tomas Winklerdb3ed432012-11-11 17:37:59 +0200108 list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
109 if (mei_cl_cmp_id(cl, pos->cl))
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200110 return pos;
Oren Weilab841162011-05-15 13:43:41 +0300111 return NULL;
112}
113
114/**
115 * mei_open - the open function
116 *
117 * @inode: pointer to inode structure
118 * @file: pointer to file structure
119 *
120 * returns 0 on success, <0 on error
121 */
122static int mei_open(struct inode *inode, struct file *file)
123{
124 struct mei_cl *cl;
Oren Weilab841162011-05-15 13:43:41 +0300125 struct mei_device *dev;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200126 unsigned long cl_id;
127 int err;
Oren Weilab841162011-05-15 13:43:41 +0300128
129 err = -ENODEV;
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300130 if (!mei_pdev)
Oren Weilab841162011-05-15 13:43:41 +0300131 goto out;
132
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300133 dev = pci_get_drvdata(mei_pdev);
Oren Weil5b881e32011-11-13 09:41:14 +0200134 if (!dev)
Oren Weilab841162011-05-15 13:43:41 +0300135 goto out;
136
137 mutex_lock(&dev->device_lock);
138 err = -ENOMEM;
Tomas Winklerc95efb72011-05-25 17:28:21 +0300139 cl = mei_cl_allocate(dev);
Oren Weilab841162011-05-15 13:43:41 +0300140 if (!cl)
Alexey Khoroshilov303dfbf2011-08-31 00:41:14 +0400141 goto out_unlock;
Oren Weilab841162011-05-15 13:43:41 +0300142
143 err = -ENODEV;
Tomas Winklerb210d752012-08-07 00:03:56 +0300144 if (dev->dev_state != MEI_DEV_ENABLED) {
145 dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
146 mei_dev_state_str(dev->dev_state));
Oren Weilab841162011-05-15 13:43:41 +0300147 goto out_unlock;
148 }
149 err = -EMFILE;
Tomas Winkler1b812942012-09-11 00:43:20 +0300150 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
151 dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
152 MEI_MAX_OPEN_HANDLE_COUNT);
Oren Weilab841162011-05-15 13:43:41 +0300153 goto out_unlock;
Tomas Winkler1b812942012-09-11 00:43:20 +0300154 }
Oren Weilab841162011-05-15 13:43:41 +0300155
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200156 cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
Tomas Winkler1b812942012-09-11 00:43:20 +0300157 if (cl_id >= MEI_CLIENTS_MAX) {
158 dev_err(&dev->pdev->dev, "client_id exceded %d",
159 MEI_CLIENTS_MAX) ;
Oren Weilab841162011-05-15 13:43:41 +0300160 goto out_unlock;
Tomas Winkler1b812942012-09-11 00:43:20 +0300161 }
Oren Weilab841162011-05-15 13:43:41 +0300162
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200163 cl->host_client_id = cl_id;
164
Oren Weilab841162011-05-15 13:43:41 +0300165 dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
166
167 dev->open_handle_count++;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200168
Oren Weilab841162011-05-15 13:43:41 +0300169 list_add_tail(&cl->link, &dev->file_list);
170
171 set_bit(cl->host_client_id, dev->host_clients_map);
172 cl->state = MEI_FILE_INITIALIZING;
173 cl->sm_state = 0;
174
175 file->private_data = cl;
176 mutex_unlock(&dev->device_lock);
177
Oren Weil5b881e32011-11-13 09:41:14 +0200178 return nonseekable_open(inode, file);
Oren Weilab841162011-05-15 13:43:41 +0300179
180out_unlock:
181 mutex_unlock(&dev->device_lock);
182 kfree(cl);
183out:
184 return err;
185}
186
187/**
188 * mei_release - the release function
189 *
190 * @inode: pointer to inode structure
191 * @file: pointer to file structure
192 *
193 * returns 0 on success, <0 on error
194 */
195static int mei_release(struct inode *inode, struct file *file)
196{
197 struct mei_cl *cl = file->private_data;
198 struct mei_cl_cb *cb;
199 struct mei_device *dev;
200 int rets = 0;
201
202 if (WARN_ON(!cl || !cl->dev))
203 return -ENODEV;
204
205 dev = cl->dev;
206
207 mutex_lock(&dev->device_lock);
Tomas Winklera562d5c2012-11-11 17:38:01 +0200208 if (cl == &dev->iamthif_cl) {
209 rets = mei_amthif_release(dev, file);
210 goto out;
211 }
212 if (cl->state == MEI_FILE_CONNECTED) {
213 cl->state = MEI_FILE_DISCONNECTING;
214 dev_dbg(&dev->pdev->dev,
215 "disconnecting client host client = %d, "
216 "ME client = %d\n",
Oren Weilab841162011-05-15 13:43:41 +0300217 cl->host_client_id,
218 cl->me_client_id);
Tomas Winklera562d5c2012-11-11 17:38:01 +0200219 rets = mei_disconnect_host_client(dev, cl);
Oren Weilab841162011-05-15 13:43:41 +0300220 }
Tomas Winklera562d5c2012-11-11 17:38:01 +0200221 mei_cl_flush_queues(cl);
222 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
223 cl->host_client_id,
224 cl->me_client_id);
225
226 if (dev->open_handle_count > 0) {
227 clear_bit(cl->host_client_id, dev->host_clients_map);
228 dev->open_handle_count--;
229 }
Tomas Winklerff8b2f42012-11-11 17:38:03 +0200230 mei_me_cl_unlink(dev, cl);
Tomas Winklera562d5c2012-11-11 17:38:01 +0200231
232 /* free read cb */
233 cb = NULL;
234 if (cl->read_cb) {
235 cb = find_read_list_entry(dev, cl);
236 /* Remove entry from read list */
237 if (cb)
238 list_del(&cb->list);
239
240 cb = cl->read_cb;
241 cl->read_cb = NULL;
242 }
243
244 file->private_data = NULL;
245
246 if (cb) {
247 mei_io_cb_free(cb);
248 cb = NULL;
249 }
250
251 kfree(cl);
252out:
Oren Weilab841162011-05-15 13:43:41 +0300253 mutex_unlock(&dev->device_lock);
254 return rets;
255}
256
257
258/**
259 * mei_read - the read function.
260 *
261 * @file: pointer to file structure
262 * @ubuf: pointer to user buffer
263 * @length: buffer length
264 * @offset: data offset in buffer
265 *
266 * returns >=0 data length on success , <0 on error
267 */
268static ssize_t mei_read(struct file *file, char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200269 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300270{
271 struct mei_cl *cl = file->private_data;
272 struct mei_cl_cb *cb_pos = NULL;
273 struct mei_cl_cb *cb = NULL;
274 struct mei_device *dev;
275 int i;
276 int rets;
277 int err;
278
279
280 if (WARN_ON(!cl || !cl->dev))
281 return -ENODEV;
282
283 dev = cl->dev;
284
285 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300286 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300287 rets = -ENODEV;
288 goto out;
289 }
290
291 if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
292 /* Do not allow to read watchdog client */
Tomas Winkler07b509b2012-07-23 14:05:39 +0300293 i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
Oren Weilab841162011-05-15 13:43:41 +0300294 if (i >= 0) {
295 struct mei_me_client *me_client = &dev->me_clients[i];
Oren Weilab841162011-05-15 13:43:41 +0300296 if (cl->me_client_id == me_client->client_id) {
297 rets = -EBADF;
298 goto out;
299 }
300 }
301 } else {
302 cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
303 }
304
305 if (cl == &dev->iamthif_cl) {
Tomas Winkler19838fb2012-11-01 21:17:15 +0200306 rets = mei_amthif_read(dev, file, ubuf, length, offset);
Oren Weilab841162011-05-15 13:43:41 +0300307 goto out;
308 }
309
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200310 if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
Oren Weilab841162011-05-15 13:43:41 +0300311 cb = cl->read_cb;
312 goto copy_buffer;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200313 } else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
314 cl->read_cb->buf_idx <= *offset) {
Oren Weilab841162011-05-15 13:43:41 +0300315 cb = cl->read_cb;
316 rets = 0;
317 goto free;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200318 } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700319 /*Offset needs to be cleaned for contiguous reads*/
Oren Weilab841162011-05-15 13:43:41 +0300320 *offset = 0;
321 rets = 0;
322 goto out;
323 }
324
325 err = mei_start_read(dev, cl);
326 if (err && err != -EBUSY) {
327 dev_dbg(&dev->pdev->dev,
328 "mei start read failure with status = %d\n", err);
329 rets = err;
330 goto out;
331 }
332
333 if (MEI_READ_COMPLETE != cl->reading_state &&
334 !waitqueue_active(&cl->rx_wait)) {
335 if (file->f_flags & O_NONBLOCK) {
336 rets = -EAGAIN;
337 goto out;
338 }
339
340 mutex_unlock(&dev->device_lock);
341
342 if (wait_event_interruptible(cl->rx_wait,
343 (MEI_READ_COMPLETE == cl->reading_state ||
344 MEI_FILE_INITIALIZING == cl->state ||
345 MEI_FILE_DISCONNECTED == cl->state ||
346 MEI_FILE_DISCONNECTING == cl->state))) {
347 if (signal_pending(current))
348 return -EINTR;
349 return -ERESTARTSYS;
350 }
351
352 mutex_lock(&dev->device_lock);
353 if (MEI_FILE_INITIALIZING == cl->state ||
354 MEI_FILE_DISCONNECTED == cl->state ||
355 MEI_FILE_DISCONNECTING == cl->state) {
356 rets = -EBUSY;
357 goto out;
358 }
359 }
360
361 cb = cl->read_cb;
362
363 if (!cb) {
364 rets = -ENODEV;
365 goto out;
366 }
367 if (cl->reading_state != MEI_READ_COMPLETE) {
368 rets = 0;
369 goto out;
370 }
371 /* now copy the data to user space */
372copy_buffer:
373 dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
374 cb->response_buffer.size);
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200375 dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
376 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
Oren Weilab841162011-05-15 13:43:41 +0300377 rets = -EMSGSIZE;
378 goto free;
379 }
380
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200381 /* length is being truncated to PAGE_SIZE,
382 * however buf_idx may point beyond that */
383 length = min_t(size_t, length, cb->buf_idx - *offset);
Oren Weilab841162011-05-15 13:43:41 +0300384
Tomas Winkler441ab502011-12-13 23:39:34 +0200385 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
Oren Weilab841162011-05-15 13:43:41 +0300386 rets = -EFAULT;
387 goto free;
388 }
389
390 rets = length;
391 *offset += length;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200392 if ((unsigned long)*offset < cb->buf_idx)
Oren Weilab841162011-05-15 13:43:41 +0300393 goto out;
394
395free:
396 cb_pos = find_read_list_entry(dev, cl);
397 /* Remove entry from read list */
398 if (cb_pos)
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200399 list_del(&cb_pos->list);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200400 mei_io_cb_free(cb);
Oren Weilab841162011-05-15 13:43:41 +0300401 cl->reading_state = MEI_IDLE;
402 cl->read_cb = NULL;
403 cl->read_pending = 0;
404out:
405 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
406 mutex_unlock(&dev->device_lock);
407 return rets;
408}
Tomas Winkler33d28c92012-10-09 16:50:17 +0200409/**
Oren Weilab841162011-05-15 13:43:41 +0300410 * mei_write - the write function.
411 *
412 * @file: pointer to file structure
413 * @ubuf: pointer to user buffer
414 * @length: buffer length
415 * @offset: data offset in buffer
416 *
417 * returns >=0 data length on success , <0 on error
418 */
419static ssize_t mei_write(struct file *file, const char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200420 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300421{
422 struct mei_cl *cl = file->private_data;
423 struct mei_cl_cb *write_cb = NULL;
424 struct mei_msg_hdr mei_hdr;
425 struct mei_device *dev;
426 unsigned long timeout = 0;
427 int rets;
428 int i;
429
430 if (WARN_ON(!cl || !cl->dev))
431 return -ENODEV;
432
433 dev = cl->dev;
434
435 mutex_lock(&dev->device_lock);
436
Tomas Winklerb210d752012-08-07 00:03:56 +0300437 if (dev->dev_state != MEI_DEV_ENABLED) {
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200438 rets = -ENODEV;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200439 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300440 }
441
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200442 i = mei_me_cl_by_id(dev, cl->me_client_id);
443 if (i < 0) {
444 rets = -ENODEV;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200445 goto err;
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200446 }
447 if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
448 rets = -EMSGSIZE;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200449 goto err;
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200450 }
451
452 if (cl->state != MEI_FILE_CONNECTED) {
453 rets = -ENODEV;
454 dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
455 cl->host_client_id, cl->me_client_id);
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200456 goto err;
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200457 }
Oren Weilab841162011-05-15 13:43:41 +0300458 if (cl == &dev->iamthif_cl) {
Tomas Winkler19838fb2012-11-01 21:17:15 +0200459 write_cb = mei_amthif_find_read_list_entry(dev, file);
Oren Weilab841162011-05-15 13:43:41 +0300460
461 if (write_cb) {
462 timeout = write_cb->read_time +
Tomas Winkler3870c322012-11-01 21:17:14 +0200463 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
Oren Weilab841162011-05-15 13:43:41 +0300464
465 if (time_after(jiffies, timeout) ||
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200466 cl->reading_state == MEI_READ_COMPLETE) {
467 *offset = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200468 list_del(&write_cb->list);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200469 mei_io_cb_free(write_cb);
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200470 write_cb = NULL;
Oren Weilab841162011-05-15 13:43:41 +0300471 }
472 }
473 }
474
475 /* free entry used in read */
476 if (cl->reading_state == MEI_READ_COMPLETE) {
477 *offset = 0;
478 write_cb = find_read_list_entry(dev, cl);
479 if (write_cb) {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200480 list_del(&write_cb->list);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200481 mei_io_cb_free(write_cb);
Oren Weilab841162011-05-15 13:43:41 +0300482 write_cb = NULL;
483 cl->reading_state = MEI_IDLE;
484 cl->read_cb = NULL;
485 cl->read_pending = 0;
486 }
Tomas Winkler441ab502011-12-13 23:39:34 +0200487 } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
Oren Weilab841162011-05-15 13:43:41 +0300488 *offset = 0;
489
490
Tomas Winkler33d28c92012-10-09 16:50:17 +0200491 write_cb = mei_io_cb_init(cl, file);
Oren Weilab841162011-05-15 13:43:41 +0300492 if (!write_cb) {
Tomas Winkler33d28c92012-10-09 16:50:17 +0200493 dev_err(&dev->pdev->dev, "write cb allocation failed\n");
494 rets = -ENOMEM;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200495 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300496 }
Tomas Winkler33d28c92012-10-09 16:50:17 +0200497 rets = mei_io_cb_alloc_req_buf(write_cb, length);
498 if (rets)
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200499 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300500
Tomas Winkler33d28c92012-10-09 16:50:17 +0200501 dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
Oren Weilab841162011-05-15 13:43:41 +0300502
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200503 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
504 if (rets)
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200505 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300506
507 cl->sm_state = 0;
508 if (length == 4 &&
509 ((memcmp(mei_wd_state_independence_msg[0],
510 write_cb->request_buffer.data, 4) == 0) ||
511 (memcmp(mei_wd_state_independence_msg[1],
512 write_cb->request_buffer.data, 4) == 0) ||
513 (memcmp(mei_wd_state_independence_msg[2],
514 write_cb->request_buffer.data, 4) == 0)))
515 cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
516
Oren Weilab841162011-05-15 13:43:41 +0300517 if (cl == &dev->iamthif_cl) {
Tomas Winklerab5c4a52012-11-01 21:17:18 +0200518 rets = mei_amthif_write(dev, write_cb);
519
520 if (rets) {
521 dev_err(&dev->pdev->dev,
522 "amthi write failed with status = %d\n", rets);
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200523 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300524 }
525 mutex_unlock(&dev->device_lock);
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200526 return length;
Oren Weilab841162011-05-15 13:43:41 +0300527 }
528
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200529 write_cb->fop_type = MEI_FOP_WRITE;
Oren Weilab841162011-05-15 13:43:41 +0300530
531 dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
532 cl->host_client_id, cl->me_client_id);
Oren Weilab841162011-05-15 13:43:41 +0300533 rets = mei_flow_ctrl_creds(dev, cl);
534 if (rets < 0)
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200535 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300536
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200537 if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200538 write_cb->buf_idx = 0;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200539 mei_hdr.msg_complete = 0;
Oren Weilab841162011-05-15 13:43:41 +0300540 cl->writing_state = MEI_WRITING;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200541 goto out;
542 }
543
544 dev->mei_host_buffer_is_empty = false;
545 if (length > mei_hbuf_max_data(dev)) {
546 mei_hdr.length = mei_hbuf_max_data(dev);
547 mei_hdr.msg_complete = 0;
548 } else {
549 mei_hdr.length = length;
550 mei_hdr.msg_complete = 1;
551 }
552 mei_hdr.host_addr = cl->host_client_id;
553 mei_hdr.me_addr = cl->me_client_id;
554 mei_hdr.reserved = 0;
555 dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
556 *((u32 *) &mei_hdr));
Tomas Winkler438763f2012-12-25 19:05:59 +0200557 if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200558 rets = -ENODEV;
559 goto err;
560 }
561 cl->writing_state = MEI_WRITING;
562 write_cb->buf_idx = mei_hdr.length;
563
564out:
565 if (mei_hdr.msg_complete) {
566 if (mei_flow_ctrl_reduce(dev, cl)) {
567 rets = -ENODEV;
568 goto err;
569 }
570 list_add_tail(&write_cb->list, &dev->write_waiting_list.list);
571 } else {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200572 list_add_tail(&write_cb->list, &dev->write_list.list);
Oren Weilab841162011-05-15 13:43:41 +0300573 }
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200574
Oren Weilab841162011-05-15 13:43:41 +0300575 mutex_unlock(&dev->device_lock);
576 return length;
577
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200578err:
Oren Weilab841162011-05-15 13:43:41 +0300579 mutex_unlock(&dev->device_lock);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200580 mei_io_cb_free(write_cb);
Oren Weilab841162011-05-15 13:43:41 +0300581 return rets;
582}
583
584
585/**
586 * mei_ioctl - the IOCTL function
587 *
588 * @file: pointer to file structure
589 * @cmd: ioctl command
590 * @data: pointer to mei message structure
591 *
592 * returns 0 on success , <0 on error
593 */
594static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
595{
596 struct mei_device *dev;
597 struct mei_cl *cl = file->private_data;
598 struct mei_connect_client_data *connect_data = NULL;
599 int rets;
600
601 if (cmd != IOCTL_MEI_CONNECT_CLIENT)
602 return -EINVAL;
603
604 if (WARN_ON(!cl || !cl->dev))
605 return -ENODEV;
606
607 dev = cl->dev;
608
609 dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
610
611 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300612 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300613 rets = -ENODEV;
614 goto out;
615 }
616
617 dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
618
619 connect_data = kzalloc(sizeof(struct mei_connect_client_data),
620 GFP_KERNEL);
621 if (!connect_data) {
622 rets = -ENOMEM;
623 goto out;
624 }
625 dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
626 if (copy_from_user(connect_data, (char __user *)data,
627 sizeof(struct mei_connect_client_data))) {
628 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
629 rets = -EFAULT;
630 goto out;
631 }
632 rets = mei_ioctl_connect_client(file, connect_data);
633
634 /* if all is ok, copying the data back to user. */
635 if (rets)
636 goto out;
637
638 dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
639 if (copy_to_user((char __user *)data, connect_data,
640 sizeof(struct mei_connect_client_data))) {
641 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
642 rets = -EFAULT;
643 goto out;
644 }
645
646out:
647 kfree(connect_data);
648 mutex_unlock(&dev->device_lock);
649 return rets;
650}
651
652/**
653 * mei_compat_ioctl - the compat IOCTL function
654 *
655 * @file: pointer to file structure
656 * @cmd: ioctl command
657 * @data: pointer to mei message structure
658 *
659 * returns 0 on success , <0 on error
660 */
661#ifdef CONFIG_COMPAT
662static long mei_compat_ioctl(struct file *file,
Tomas Winkler441ab502011-12-13 23:39:34 +0200663 unsigned int cmd, unsigned long data)
Oren Weilab841162011-05-15 13:43:41 +0300664{
665 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
666}
667#endif
668
669
670/**
671 * mei_poll - the poll function
672 *
673 * @file: pointer to file structure
674 * @wait: pointer to poll_table structure
675 *
676 * returns poll mask
677 */
678static unsigned int mei_poll(struct file *file, poll_table *wait)
679{
680 struct mei_cl *cl = file->private_data;
681 struct mei_device *dev;
682 unsigned int mask = 0;
683
684 if (WARN_ON(!cl || !cl->dev))
685 return mask;
686
687 dev = cl->dev;
688
689 mutex_lock(&dev->device_lock);
690
Tomas Winklerb210d752012-08-07 00:03:56 +0300691 if (dev->dev_state != MEI_DEV_ENABLED)
Oren Weilab841162011-05-15 13:43:41 +0300692 goto out;
693
694
695 if (cl == &dev->iamthif_cl) {
Tomas Winkler744f0f22012-11-11 17:38:02 +0200696 mask = mei_amthif_poll(dev, file, wait);
Oren Weilab841162011-05-15 13:43:41 +0300697 goto out;
698 }
699
700 mutex_unlock(&dev->device_lock);
701 poll_wait(file, &cl->tx_wait, wait);
702 mutex_lock(&dev->device_lock);
703 if (MEI_WRITE_COMPLETE == cl->writing_state)
704 mask |= (POLLIN | POLLRDNORM);
705
706out:
707 mutex_unlock(&dev->device_lock);
708 return mask;
709}
710
Oren Weil5b881e32011-11-13 09:41:14 +0200711/*
712 * file operations structure will be used for mei char device.
713 */
714static const struct file_operations mei_fops = {
715 .owner = THIS_MODULE,
716 .read = mei_read,
717 .unlocked_ioctl = mei_ioctl,
718#ifdef CONFIG_COMPAT
719 .compat_ioctl = mei_compat_ioctl,
720#endif
721 .open = mei_open,
722 .release = mei_release,
723 .write = mei_write,
724 .poll = mei_poll,
725 .llseek = no_llseek
726};
727
728
729/*
730 * Misc Device Struct
731 */
732static struct miscdevice mei_misc_device = {
Tomas Winklerc38ea242012-04-02 20:32:39 +0300733 .name = "mei",
Oren Weil5b881e32011-11-13 09:41:14 +0200734 .fops = &mei_fops,
735 .minor = MISC_DYNAMIC_MINOR,
736};
737
738/**
Tomas Winkler9a123f12012-08-06 15:23:55 +0300739 * mei_quirk_probe - probe for devices that doesn't valid ME interface
740 * @pdev: PCI device structure
741 * @ent: entry into pci_device_table
742 *
743 * returns true if ME Interface is valid, false otherwise
744 */
Bill Pemberton80c8ae22012-11-19 13:23:05 -0500745static bool mei_quirk_probe(struct pci_dev *pdev,
Tomas Winkler9a123f12012-08-06 15:23:55 +0300746 const struct pci_device_id *ent)
747{
748 u32 reg;
749 if (ent->device == MEI_DEV_ID_PBG_1) {
750 pci_read_config_dword(pdev, 0x48, &reg);
751 /* make sure that bit 9 is up and bit 10 is down */
752 if ((reg & 0x600) == 0x200) {
753 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
754 return false;
755 }
756 }
757 return true;
758}
759/**
Oren Weil5b881e32011-11-13 09:41:14 +0200760 * mei_probe - Device Initialization Routine
761 *
762 * @pdev: PCI device structure
763 * @ent: entry in kcs_pci_tbl
764 *
765 * returns 0 on success, <0 on failure.
766 */
Bill Pemberton80c8ae22012-11-19 13:23:05 -0500767static int mei_probe(struct pci_dev *pdev,
Oren Weil5b881e32011-11-13 09:41:14 +0200768 const struct pci_device_id *ent)
769{
770 struct mei_device *dev;
771 int err;
772
773 mutex_lock(&mei_mutex);
Tomas Winkler9a123f12012-08-06 15:23:55 +0300774
775 if (!mei_quirk_probe(pdev, ent)) {
776 err = -ENODEV;
777 goto end;
778 }
779
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300780 if (mei_pdev) {
Oren Weil5b881e32011-11-13 09:41:14 +0200781 err = -EEXIST;
782 goto end;
783 }
784 /* enable pci dev */
785 err = pci_enable_device(pdev);
786 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300787 dev_err(&pdev->dev, "failed to enable pci device.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200788 goto end;
789 }
790 /* set PCI host mastering */
791 pci_set_master(pdev);
792 /* pci request regions for mei driver */
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300793 err = pci_request_regions(pdev, KBUILD_MODNAME);
Oren Weil5b881e32011-11-13 09:41:14 +0200794 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300795 dev_err(&pdev->dev, "failed to get pci regions.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200796 goto disable_device;
797 }
798 /* allocates and initializes the mei dev structure */
799 dev = mei_device_init(pdev);
800 if (!dev) {
801 err = -ENOMEM;
802 goto release_regions;
803 }
804 /* mapping IO device memory */
805 dev->mem_addr = pci_iomap(pdev, 0, 0);
806 if (!dev->mem_addr) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300807 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200808 err = -ENOMEM;
809 goto free_device;
810 }
811 pci_enable_msi(pdev);
812
813 /* request and enable interrupt */
814 if (pci_dev_msi_enabled(pdev))
815 err = request_threaded_irq(pdev->irq,
816 NULL,
817 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300818 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200819 else
820 err = request_threaded_irq(pdev->irq,
821 mei_interrupt_quick_handler,
822 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300823 IRQF_SHARED, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200824
825 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300826 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
Oren Weil5b881e32011-11-13 09:41:14 +0200827 pdev->irq);
Samuel Ortiz169dc382012-06-11 12:18:30 +0300828 goto disable_msi;
Oren Weil5b881e32011-11-13 09:41:14 +0200829 }
830 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
Samuel Ortizc1174c02012-11-18 15:13:20 +0200831 INIT_WORK(&dev->init_work, mei_host_client_init);
832
Oren Weil5b881e32011-11-13 09:41:14 +0200833 if (mei_hw_init(dev)) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300834 dev_err(&pdev->dev, "init hw failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200835 err = -ENODEV;
836 goto release_irq;
837 }
838
839 err = misc_register(&mei_misc_device);
840 if (err)
841 goto release_irq;
842
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300843 mei_pdev = pdev;
Oren Weil5b881e32011-11-13 09:41:14 +0200844 pci_set_drvdata(pdev, dev);
845
846
847 schedule_delayed_work(&dev->timer_work, HZ);
848
849 mutex_unlock(&mei_mutex);
850
Tomas Winkler2f3d2b42012-03-19 22:38:13 +0200851 pr_debug("initialization successful.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200852
853 return 0;
854
855release_irq:
856 /* disable interrupts */
857 dev->host_hw_state = mei_hcsr_read(dev);
858 mei_disable_interrupts(dev);
859 flush_scheduled_work();
860 free_irq(pdev->irq, dev);
Samuel Ortiz169dc382012-06-11 12:18:30 +0300861disable_msi:
Oren Weil5b881e32011-11-13 09:41:14 +0200862 pci_disable_msi(pdev);
Oren Weil5b881e32011-11-13 09:41:14 +0200863 pci_iounmap(pdev, dev->mem_addr);
864free_device:
865 kfree(dev);
866release_regions:
867 pci_release_regions(pdev);
868disable_device:
869 pci_disable_device(pdev);
870end:
871 mutex_unlock(&mei_mutex);
Tomas Winkler32c826b2012-05-08 23:04:56 +0300872 dev_err(&pdev->dev, "initialization failed.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200873 return err;
874}
875
876/**
877 * mei_remove - Device Removal Routine
878 *
879 * @pdev: PCI device structure
880 *
881 * mei_remove is called by the PCI subsystem to alert the driver
882 * that it should release a PCI device.
883 */
Bill Pemberton486a5c22012-11-19 13:26:02 -0500884static void mei_remove(struct pci_dev *pdev)
Oren Weil5b881e32011-11-13 09:41:14 +0200885{
886 struct mei_device *dev;
887
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300888 if (mei_pdev != pdev)
Oren Weil5b881e32011-11-13 09:41:14 +0200889 return;
890
891 dev = pci_get_drvdata(pdev);
892 if (!dev)
893 return;
894
895 mutex_lock(&dev->device_lock);
896
Tomas Winklerc216fde2012-08-16 19:39:43 +0300897 cancel_delayed_work(&dev->timer_work);
898
899 mei_wd_stop(dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200900
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300901 mei_pdev = NULL;
Oren Weil5b881e32011-11-13 09:41:14 +0200902
903 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
904 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
905 mei_disconnect_host_client(dev, &dev->iamthif_cl);
906 }
907 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
908 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
909 mei_disconnect_host_client(dev, &dev->wd_cl);
910 }
911
912 /* Unregistering watchdog device */
Tomas Winkler70cd5332011-12-22 18:50:50 +0200913 mei_watchdog_unregister(dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200914
915 /* remove entry if already in list */
916 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
Tomas Winklerff8b2f42012-11-11 17:38:03 +0200917 mei_me_cl_unlink(dev, &dev->wd_cl);
918 mei_me_cl_unlink(dev, &dev->iamthif_cl);
Oren Weil5b881e32011-11-13 09:41:14 +0200919
920 dev->iamthif_current_cb = NULL;
921 dev->me_clients_num = 0;
922
923 mutex_unlock(&dev->device_lock);
924
925 flush_scheduled_work();
926
927 /* disable interrupts */
928 mei_disable_interrupts(dev);
929
930 free_irq(pdev->irq, dev);
931 pci_disable_msi(pdev);
932 pci_set_drvdata(pdev, NULL);
933
934 if (dev->mem_addr)
935 pci_iounmap(pdev, dev->mem_addr);
936
937 kfree(dev);
938
939 pci_release_regions(pdev);
940 pci_disable_device(pdev);
Tomas Winklera44cab42012-05-29 16:39:11 +0300941
942 misc_deregister(&mei_misc_device);
Oren Weil5b881e32011-11-13 09:41:14 +0200943}
Oren Weilab841162011-05-15 13:43:41 +0300944#ifdef CONFIG_PM
945static int mei_pci_suspend(struct device *device)
946{
947 struct pci_dev *pdev = to_pci_dev(device);
948 struct mei_device *dev = pci_get_drvdata(pdev);
949 int err;
950
951 if (!dev)
952 return -ENODEV;
953 mutex_lock(&dev->device_lock);
Tomas Winklerc216fde2012-08-16 19:39:43 +0300954
955 cancel_delayed_work(&dev->timer_work);
956
Oren Weilab841162011-05-15 13:43:41 +0300957 /* Stop watchdog if exists */
Tomas Winklerc216fde2012-08-16 19:39:43 +0300958 err = mei_wd_stop(dev);
Oren Weilab841162011-05-15 13:43:41 +0300959 /* Set new mei state */
Tomas Winklerb210d752012-08-07 00:03:56 +0300960 if (dev->dev_state == MEI_DEV_ENABLED ||
961 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
962 dev->dev_state = MEI_DEV_POWER_DOWN;
Oren Weilab841162011-05-15 13:43:41 +0300963 mei_reset(dev, 0);
964 }
965 mutex_unlock(&dev->device_lock);
966
967 free_irq(pdev->irq, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300968 pci_disable_msi(pdev);
Oren Weilab841162011-05-15 13:43:41 +0300969
970 return err;
971}
972
973static int mei_pci_resume(struct device *device)
974{
975 struct pci_dev *pdev = to_pci_dev(device);
976 struct mei_device *dev;
977 int err;
978
979 dev = pci_get_drvdata(pdev);
980 if (!dev)
981 return -ENODEV;
982
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300983 pci_enable_msi(pdev);
984
985 /* request and enable interrupt */
986 if (pci_dev_msi_enabled(pdev))
987 err = request_threaded_irq(pdev->irq,
988 NULL,
989 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300990 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300991 else
992 err = request_threaded_irq(pdev->irq,
Oren Weilab841162011-05-15 13:43:41 +0300993 mei_interrupt_quick_handler,
994 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300995 IRQF_SHARED, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300996
Oren Weilab841162011-05-15 13:43:41 +0300997 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300998 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
999 pdev->irq);
Oren Weilab841162011-05-15 13:43:41 +03001000 return err;
1001 }
1002
1003 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +03001004 dev->dev_state = MEI_DEV_POWER_UP;
Oren Weilab841162011-05-15 13:43:41 +03001005 mei_reset(dev, 1);
1006 mutex_unlock(&dev->device_lock);
1007
Oren Weil6d70e932011-09-07 09:03:14 +03001008 /* Start timer if stopped in suspend */
1009 schedule_delayed_work(&dev->timer_work, HZ);
1010
Oren Weilab841162011-05-15 13:43:41 +03001011 return err;
1012}
1013static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
1014#define MEI_PM_OPS (&mei_pm_ops)
1015#else
Randy Dunlap2d990362011-05-19 08:52:34 -07001016#define MEI_PM_OPS NULL
Oren Weilab841162011-05-15 13:43:41 +03001017#endif /* CONFIG_PM */
1018/*
1019 * PCI driver structure
1020 */
1021static struct pci_driver mei_driver = {
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001022 .name = KBUILD_MODNAME,
Oren Weilab841162011-05-15 13:43:41 +03001023 .id_table = mei_pci_tbl,
1024 .probe = mei_probe,
Bill Pemberton9306a8b2012-11-19 13:20:25 -05001025 .remove = mei_remove,
1026 .shutdown = mei_remove,
Oren Weilab841162011-05-15 13:43:41 +03001027 .driver.pm = MEI_PM_OPS,
1028};
1029
Tomas Winkler60781882012-07-19 09:45:32 +03001030module_pci_driver(mei_driver);
Oren Weilab841162011-05-15 13:43:41 +03001031
1032MODULE_AUTHOR("Intel Corporation");
1033MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1034MODULE_LICENSE("GPL v2");