path: root/mm/page_alloc.c
diff options
authorMel Gorman <>2009-06-16 15:32:05 -0700
committerLinus Torvalds <>2009-06-16 19:47:34 -0700
commit0a15c3e9f649f71464ac39e6378f1fde6f995322 (patch)
treea2a69331347a3fdfa5d2ab6be3e52e71832ae153 /mm/page_alloc.c
parent728ec980fb9fa2d65d9e05444079a53615985e7b (diff)
page allocator: inline buffered_rmqueue()
buffered_rmqueue() is in the fast path so inline it. Because it only has one call site, this function can then be inlined without causing text bloat. On an x86-based config, it made no difference as the savings were padded out by NOP instructions. Milage varies but text will either decrease in size or remain static. Signed-off-by: Mel Gorman <> Reviewed-by: KOSAKI Motohiro <> Cc: Christoph Lameter <> Cc: Pekka Enberg <> Cc: Peter Zijlstra <> Cc: Nick Piggin <> Cc: Dave Hansen <> Cc: Lee Schermerhorn <> Signed-off-by: Andrew Morton <> Signed-off-by: Linus Torvalds <>
Diffstat (limited to 'mm/page_alloc.c')
1 files changed, 2 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 04713f649fd..c101921e6a6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1072,7 +1072,8 @@ void split_page(struct page *page, unsigned int order)
* we cheat by calling it from here, in the order > 0 path. Saves a branch
* or two.
-static struct page *buffered_rmqueue(struct zone *preferred_zone,
+static inline
+struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, int order, gfp_t gfp_flags,
int migratetype)