aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJan Kara <jack@suse.com>2015-08-06 15:46:42 -0700
committerSasha Levin <sasha.levin@oracle.com>2015-08-19 13:16:07 -0400
commit81f2af92d90824ba6a37157118d5bf997fc5f4d9 (patch)
tree460725c0e2977d2abb15376c3fd7457725e398e5 /fs
parent204041c4a640f004a5df152434f05b96ee76dfab (diff)
fsnotify: fix oops in fsnotify_clear_marks_by_group_flags()
[ Upstream commit 8f2f3eb59dff4ec538de55f2e0592fec85966aab ] fsnotify_clear_marks_by_group_flags() can race with fsnotify_destroy_marks() so that when fsnotify_destroy_mark_locked() drops mark_mutex, a mark from the list iterated by fsnotify_clear_marks_by_group_flags() can be freed and thus the next entry pointer we have cached may become stale and we dereference free memory. Fix the problem by first moving marks to free to a special private list and then always free the first entry in the special list. This method is safe even when entries from the list can disappear once we drop the lock. Signed-off-by: Jan Kara <jack@suse.com> Reported-by: Ashish Sangwan <a.sangwan@samsung.com> Reviewed-by: Ashish Sangwan <a.sangwan@samsung.com> Cc: Lino Sanfilippo <LinoSanfilippo@gmx.de> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/notify/mark.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 34c38fabf514..28c90a6fcd6c 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -329,16 +329,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
unsigned int flags)
{
struct fsnotify_mark *lmark, *mark;
+ LIST_HEAD(to_free);
+ /*
+ * We have to be really careful here. Anytime we drop mark_mutex, e.g.
+ * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
+ * to_free list so we have to use mark_mutex even when accessing that
+ * list. And freeing mark requires us to drop mark_mutex. So we can
+ * reliably free only the first mark in the list. That's why we first
+ * move marks to free to to_free list in one go and then free marks in
+ * to_free list one by one.
+ */
mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
- if (mark->flags & flags) {
- fsnotify_get_mark(mark);
- fsnotify_destroy_mark_locked(mark, group);
- fsnotify_put_mark(mark);
- }
+ if (mark->flags & flags)
+ list_move(&mark->g_list, &to_free);
}
mutex_unlock(&group->mark_mutex);
+
+ while (1) {
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+ if (list_empty(&to_free)) {
+ mutex_unlock(&group->mark_mutex);
+ break;
+ }
+ mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
+ fsnotify_get_mark(mark);
+ fsnotify_destroy_mark_locked(mark, group);
+ mutex_unlock(&group->mark_mutex);
+ fsnotify_put_mark(mark);
+ }
}
/*