aboutsummaryrefslogtreecommitdiff
path: root/drivers/video/fb_defio.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/fb_defio.c')
-rw-r--r--drivers/video/fb_defio.c54
1 files changed, 20 insertions, 34 deletions
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 1105a591dcc..6b93ef93cb1 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -66,7 +66,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
return 0;
}
-int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
+int fb_deferred_io_fsync(struct file *file, int datasync)
{
struct fb_info *info = file->private_data;
@@ -100,6 +100,16 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
/* protect against the workqueue changing the page list */
mutex_lock(&fbdefio->lock);
+ /*
+ * We want the page to remain locked from ->page_mkwrite until
+ * the PTE is marked dirty to avoid page_mkclean() being called
+ * before the PTE is updated, which would leave the page ignored
+ * by defio.
+ * Do this by locking the page here and informing the caller
+ * about it with VM_FAULT_LOCKED.
+ */
+ lock_page(page);
+
/* we loop through the pagelist before adding in order
to keep the pagelist sorted */
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
@@ -121,7 +131,7 @@ page_already_added:
/* come back after delay to process the deferred IO */
schedule_delayed_work(&info->deferred_work, fbdefio->delay);
- return 0;
+ return VM_FAULT_LOCKED;
}
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
@@ -155,41 +165,25 @@ static void fb_deferred_io_work(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info,
deferred_work.work);
+ struct list_head *node, *next;
+ struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
- struct page *page, *tmp_page;
- struct list_head *node, *tmp_node;
- struct list_head non_dirty;
-
- INIT_LIST_HEAD(&non_dirty);
/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
- list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
- lock_page(page);
- /*
- * The workqueue callback can be triggered after a
- * ->page_mkwrite() call but before the PTE has been marked
- * dirty. In this case page_mkclean() won't "rearm" the page.
- *
- * To avoid this, remove those "non-dirty" pages from the
- * pagelist before calling the driver's callback, then add
- * them back to get processed on the next work iteration.
- * At that time, their PTEs will hopefully be dirty for real.
- */
- if (!page_mkclean(page))
- list_move_tail(&page->lru, &non_dirty);
- unlock_page(page);
+ list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+ lock_page(cur);
+ page_mkclean(cur);
+ unlock_page(cur);
}
/* driver's callback with pagelist */
fbdefio->deferred_io(info, &fbdefio->pagelist);
- /* clear the list... */
- list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
+ /* clear the list */
+ list_for_each_safe(node, next, &fbdefio->pagelist) {
list_del(node);
}
- /* ... and add back the "non-dirty" pages to the list */
- list_splice_tail(&non_dirty, &fbdefio->pagelist);
mutex_unlock(&fbdefio->lock);
}
@@ -218,7 +212,6 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
- struct list_head *node, *tmp_node;
struct page *page;
int i;
@@ -226,13 +219,6 @@ void fb_deferred_io_cleanup(struct fb_info *info)
cancel_delayed_work(&info->deferred_work);
flush_scheduled_work();
- /* the list may have still some non-dirty pages at this point */
- mutex_lock(&fbdefio->lock);
- list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
- list_del(node);
- }
- mutex_unlock(&fbdefio->lock);
-
/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);