aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2018-12-28 00:39:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 12:11:51 -0800
commitcc4f11e69fd00c61c38619759b07d00631bda5ca (patch)
tree24eca7a3290201f5534870d26c70dbd5f1f28020 /mm
parent0b3901b38d9d916f634e903ce7cd2a8ddd5b1559 (diff)
mm: migrate: lock buffers before migrate_page_move_mapping()
Lock buffers before calling into migrate_page_move_mapping() so that that function doesn't have to know about buffers (which is somewhat unexpected anyway) and all the buffer head logic is in buffer_migrate_page(). Link: http://lkml.kernel.org/r/20181211172143.7358-3-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c39
1 files changed, 13 insertions, 26 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 94c9ebf1f33e..e0bc03e15e74 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -487,20 +487,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
}
/*
- * In the async migration case of moving a page with buffers, lock the
- * buffers using trylock before the mapping is moved. If the mapping
- * was moved, we later failed to lock the buffers and could not move
- * the mapping back due to an elevated page count, we would have to
- * block waiting on other references to be dropped.
- */
- if (mode == MIGRATE_ASYNC && head &&
- !buffer_migrate_lock_buffers(head, mode)) {
- page_ref_unfreeze(page, expected_count);
- xas_unlock_irq(&xas);
- return -EAGAIN;
- }
-
- /*
* Now we know that no one else is looking at the page:
* no turning back from here.
*/
@@ -775,24 +761,23 @@ int buffer_migrate_page(struct address_space *mapping,
{
struct buffer_head *bh, *head;
int rc;
+ int expected_count;
if (!page_has_buffers(page))
return migrate_page(mapping, newpage, page, mode);
- head = page_buffers(page);
+ /* Check whether page does not have extra refs before we do more work */
+ expected_count = expected_page_refs(page);
+ if (page_count(page) != expected_count)
+ return -EAGAIN;
- rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
+ head = page_buffers(page);
+ if (!buffer_migrate_lock_buffers(head, mode))
+ return -EAGAIN;
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS)
- return rc;
-
- /*
- * In the async case, migrate_page_move_mapping locked the buffers
- * with an IRQ-safe spinlock held. In the sync case, the buffers
- * need to be locked now
- */
- if (mode != MIGRATE_ASYNC)
- BUG_ON(!buffer_migrate_lock_buffers(head, mode));
+ goto unlock_buffers;
ClearPagePrivate(page);
set_page_private(newpage, page_private(page));
@@ -814,6 +799,8 @@ int buffer_migrate_page(struct address_space *mapping,
else
migrate_page_states(newpage, page);
+ rc = MIGRATEPAGE_SUCCESS;
+unlock_buffers:
bh = head;
do {
unlock_buffer(bh);
@@ -822,7 +809,7 @@ int buffer_migrate_page(struct address_space *mapping,
} while (bh != head);
- return MIGRATEPAGE_SUCCESS;
+ return rc;
}
EXPORT_SYMBOL(buffer_migrate_page);
#endif